code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import context
from zincbase import KB
kb = KB()
kb.seed(555)
kb.store('person(tom)')
kb.store('person(shamala)')
kb.store('knows(tom, shamala)')
assert kb.neighbors('tom') == [('shamala', [{'pred': 'knows'}])]
kb.node('tom')['grains'] = 0
tom = kb.node('tom')
assert tom.grains == 0
assert tom.i_dont_exist is None
assert tom['i_dont_exist'] is None
kb.node('shamala').grains = 4
shamala = kb.node('shamala')
assert 'grains' in shamala
assert 'grains' in shamala.attrs
assert shamala.grains == 4
shamala.grains += 1
assert shamala.grains == 5
assert shamala['grains'] == 5
shamala['grains'] += 1
assert shamala['grains'] == 6
kb.store('person(jeraca)')
kb.node('jeraca').grains = 3
zero_grains = list(kb.filter(lambda x: x['grains'] == 0))
assert len(zero_grains) == 1
assert zero_grains[0] == 'tom'
assert zero_grains[0] != 'shamala'
zero_anything = list(kb.filter(lambda x: x['anything'] == 0))
assert len(zero_anything) == 0
more_grains = kb.filter(lambda x: x['grains'] >= 3)
assert next(more_grains) in ['shamala', 'jeraca']
assert next(more_grains) in ['shamala', 'jeraca']
more_grains = kb.filter(lambda x: x['grains'] >= 3, candidate_nodes=['shamala'])
as_list = list(more_grains)
assert as_list == ['shamala']
more_grains = kb.filter(lambda x: x['grains'] >= 3, candidate_nodes=[])
as_list = list(more_grains)
assert as_list == []
some_or_no_grains = kb.filter(lambda x: x['grains'] >= -1, candidate_nodes=['tom', 'shamala'])
as_list = list(some_or_no_grains)
assert len(as_list) == 2
assert as_list[0] in ['tom', 'shamala']
assert as_list[1] in ['tom', 'shamala']
assert as_list[0] != as_list[1]
nodes = kb.filter(lambda x: True)
as_list = list(nodes)
assert len(as_list) == 3
jeraca = kb.node('jeraca')
assert len(jeraca.neighbors) == 0
shamala = kb.node('shamala')
assert len(shamala.neighbors) == 0
tom = kb.node('tom')
assert len(tom.neighbors) == 1
assert tom.neighbors[0][0] == 'shamala'
assert len(tom.neighbors[0][1]) == 1
assert tom.neighbors[0][1][0]['pred'] == 'knows'
fn_was_called = False
def watch_fn(node, prev_val):
global fn_was_called
fn_was_called = True
assert prev_val == 0
assert node.grains == 1
assert len(node.neighbors) == 1
assert kb.node(node.neighbors[0][0]) == 'shamala'
nights_watch = tom.watch('grains', watch_fn)
tom.grains += 1
assert fn_was_called
fn_was_called = False
tom.remove_watch(nights_watch)
tom.grains += 1
assert not fn_was_called
nights_watch = tom.watch('grains', watch_fn)
tom.remove_watch('grains')
tom.grains += 1
assert not fn_was_called
kb.store('node(i_am_node)', node_attributes=[{'foo': 'bar'}])
new_node = kb.node('i_am_node')
assert new_node.foo == 'bar'
new_node.foo = 'baz'
new_node = kb.node('i_am_node')
assert new_node.foo == 'baz'
kb.store('connected_nodes(3, 4)', node_attributes=[{'x': 3}, {'x': 4}], edge_attributes={'power_level': 3})
_3 = kb.node(3)
_4 = kb.node(4)
assert _3.x == 3
assert _4.x == 4
assert kb.edge(3, 'connected_nodes', 4).power_level == 3
kb.edge(3, 'connected_nodes', 4).power_level = 'high'
assert kb.edge(3, 'connected_nodes', 4).power_level == 'high'
kb = KB()
kb.from_csv('./assets/countries_s1_train.csv', delimiter='\t')
kb.node('mali').zig = 123
called = False
for node in kb.nodes():
if node == 'mali':
called = True
assert called == True
assert node.attrs['zig'] == 123
assert called
mali = list(kb.nodes(lambda x: x == 'mali'))
assert len(mali) == 1
assert mali[0].zig == 123
assert 'zig' in mali[0]
assert '_watches' not in mali[0]
edges = list(kb.edges())
assert len(edges) == 1111
edges = list(kb.edges(lambda x: x.nodes[0] == 'mali'))
assert len(edges) == 8
kb.store('itisin(mali, western_africa)', edge_attributes={'def_want_visit': True})
edges = list(kb.edges(lambda x: x.pred == 'itisin' and x.nodes[0] == 'mali'))
assert edges[0]['def_want_visit'] == True
print('All attribute tests passed.') | test/test_attr.py | import context
from zincbase import KB
kb = KB()
kb.seed(555)
kb.store('person(tom)')
kb.store('person(shamala)')
kb.store('knows(tom, shamala)')
assert kb.neighbors('tom') == [('shamala', [{'pred': 'knows'}])]
kb.node('tom')['grains'] = 0
tom = kb.node('tom')
assert tom.grains == 0
assert tom.i_dont_exist is None
assert tom['i_dont_exist'] is None
kb.node('shamala').grains = 4
shamala = kb.node('shamala')
assert 'grains' in shamala
assert 'grains' in shamala.attrs
assert shamala.grains == 4
shamala.grains += 1
assert shamala.grains == 5
assert shamala['grains'] == 5
shamala['grains'] += 1
assert shamala['grains'] == 6
kb.store('person(jeraca)')
kb.node('jeraca').grains = 3
zero_grains = list(kb.filter(lambda x: x['grains'] == 0))
assert len(zero_grains) == 1
assert zero_grains[0] == 'tom'
assert zero_grains[0] != 'shamala'
zero_anything = list(kb.filter(lambda x: x['anything'] == 0))
assert len(zero_anything) == 0
more_grains = kb.filter(lambda x: x['grains'] >= 3)
assert next(more_grains) in ['shamala', 'jeraca']
assert next(more_grains) in ['shamala', 'jeraca']
more_grains = kb.filter(lambda x: x['grains'] >= 3, candidate_nodes=['shamala'])
as_list = list(more_grains)
assert as_list == ['shamala']
more_grains = kb.filter(lambda x: x['grains'] >= 3, candidate_nodes=[])
as_list = list(more_grains)
assert as_list == []
some_or_no_grains = kb.filter(lambda x: x['grains'] >= -1, candidate_nodes=['tom', 'shamala'])
as_list = list(some_or_no_grains)
assert len(as_list) == 2
assert as_list[0] in ['tom', 'shamala']
assert as_list[1] in ['tom', 'shamala']
assert as_list[0] != as_list[1]
nodes = kb.filter(lambda x: True)
as_list = list(nodes)
assert len(as_list) == 3
jeraca = kb.node('jeraca')
assert len(jeraca.neighbors) == 0
shamala = kb.node('shamala')
assert len(shamala.neighbors) == 0
tom = kb.node('tom')
assert len(tom.neighbors) == 1
assert tom.neighbors[0][0] == 'shamala'
assert len(tom.neighbors[0][1]) == 1
assert tom.neighbors[0][1][0]['pred'] == 'knows'
fn_was_called = False
def watch_fn(node, prev_val):
global fn_was_called
fn_was_called = True
assert prev_val == 0
assert node.grains == 1
assert len(node.neighbors) == 1
assert kb.node(node.neighbors[0][0]) == 'shamala'
nights_watch = tom.watch('grains', watch_fn)
tom.grains += 1
assert fn_was_called
fn_was_called = False
tom.remove_watch(nights_watch)
tom.grains += 1
assert not fn_was_called
nights_watch = tom.watch('grains', watch_fn)
tom.remove_watch('grains')
tom.grains += 1
assert not fn_was_called
kb.store('node(i_am_node)', node_attributes=[{'foo': 'bar'}])
new_node = kb.node('i_am_node')
assert new_node.foo == 'bar'
new_node.foo = 'baz'
new_node = kb.node('i_am_node')
assert new_node.foo == 'baz'
kb.store('connected_nodes(3, 4)', node_attributes=[{'x': 3}, {'x': 4}], edge_attributes={'power_level': 3})
_3 = kb.node(3)
_4 = kb.node(4)
assert _3.x == 3
assert _4.x == 4
assert kb.edge(3, 'connected_nodes', 4).power_level == 3
kb.edge(3, 'connected_nodes', 4).power_level = 'high'
assert kb.edge(3, 'connected_nodes', 4).power_level == 'high'
kb = KB()
kb.from_csv('./assets/countries_s1_train.csv', delimiter='\t')
kb.node('mali').zig = 123
called = False
for node in kb.nodes():
if node == 'mali':
called = True
assert called == True
assert node.attrs['zig'] == 123
assert called
mali = list(kb.nodes(lambda x: x == 'mali'))
assert len(mali) == 1
assert mali[0].zig == 123
assert 'zig' in mali[0]
assert '_watches' not in mali[0]
edges = list(kb.edges())
assert len(edges) == 1111
edges = list(kb.edges(lambda x: x.nodes[0] == 'mali'))
assert len(edges) == 8
kb.store('itisin(mali, western_africa)', edge_attributes={'def_want_visit': True})
edges = list(kb.edges(lambda x: x.pred == 'itisin' and x.nodes[0] == 'mali'))
assert edges[0]['def_want_visit'] == True
print('All attribute tests passed.') | 0.431105 | 0.53279 |
from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps, ImageFilter
try:
import accimage
except ImportError:
accimage = None
import scipy.ndimage as ndimage
import numpy as np
import numbers
import types
import collections
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
transforms.Compose([
transforms.CenterCrop(10),
transforms.ToTensor(),
])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
class ToPILImage(object):
"""Convert a tensor to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL.Image while preserving the value range.
"""
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL.Image.
Returns:
PIL.Image: Image converted to PIL.Image.
"""
npimg = pic
mode = None
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
assert isinstance(npimg, np.ndarray), 'pic should be Tensor or ndarray'
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
mode = 'L'
if npimg.dtype == np.int16:
mode = 'I;16'
if npimg.dtype == np.int32:
mode = 'I'
elif npimg.dtype == np.float32:
mode = 'F'
else:
if npimg.dtype == np.uint8:
mode = 'RGB'
assert mode is not None, '{} is not supported'.format(npimg.dtype)
return Image.fromarray(npimg, mode=mode)
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class Scale(object):
"""Rescale the input PIL.Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be scaled.
Returns:
PIL.Image: Rescaled image.
"""
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
return img.resize(self.size, self.interpolation)
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th))
class Pad(object):
"""Pad the given PIL.Image on all sides with the given "pad" value.
Args:
padding (int or sequence): Padding on each border. If a sequence of
length 4, it is used to pad left, top, right and bottom borders respectively.
fill: Pixel fill value. Default is 0.
"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be padded.
Returns:
PIL.Image: Padded image.
"""
return ImageOps.expand(img, border=self.padding, fill=self.fill)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class RandomCrop(object):
"""Crop the given PIL.Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return img.crop((x1, y1, x1 + tw, y1 + th))
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __init__(self, flip_prob=0.5):
self.flip_prob = flip_prob
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if random.random() < self.flip_prob:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
class CenterCropWithOffset(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, x_size, y_size, x_offset, y_offset, x_joggle, y_joggle, ignore_fault=False):
self.x_size = x_size
self.y_size = y_size
self.x_offset = x_offset
self.y_offset = y_offset
self.x_joggle = x_joggle
self.y_joggle = y_joggle
self.ignore_fault = ignore_fault
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = img.size
th = self.y_size
tw = self.x_size
x1 = int(round((w - tw) / 2.)) + self.x_offset + random.uniform(0, self.x_joggle) * (
random.uniform(0, 1) - 0.5) * 2
y1 = int(round((h - th) / 2.)) + self.y_offset + random.uniform(0, self.y_joggle) * (
random.uniform(0, 1) - 0.5) * 2
x1 = max(0, x1)
x2 = min(x1 + tw, w)
y1 = max(0, y1)
y2 = min(y1 + th, h)
if y2 - y1 != self.y_size:
if self.ignore_fault:
y1 = 0
th = y2
else:
raise (RuntimeError('(data_transformer)Size Error:y2-y1 != self.y_size:' +
str(y2 - y1) + 'vs' + str(self.y_size)))
if x2 - x1 != self.x_size:
if self.ignore_fault:
x1 = 0
tw = x2
else:
raise (RuntimeError('(data_transformer)Size Error:x2-x1 != self.x_size:' +
str(x2 - x1) + 'vs' + str(self.x_size)))
return img.crop((x1, y1, x1 + tw, y1 + th))
class GammaCorrection(object):
"""GammaCorrection
Args:
ratio: ratio to use the projection
low_0, high_0. different type of parameter
"""
def __init__(self, ratio=0, low_0=0.5, high_0=1.8, low_1=0.5, high_1=3, low_2=0.5, high_2=1, projection_type=None):
self.ratio = ratio
self.low_0 = low_0
self.high_0 = high_0
self.low_1 = low_1
self.high_1 = high_1
self.low_2 = low_2
self.high_2 = high_2
self.projection_type = projection_type
def GammaCorrection_uniform(self, img, fGamma):
data = np.array(img)
lut = [max(min(math.pow(i / 255.0, fGamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
data[:, :, :] = lut[data[:, :, :]]
return Image.fromarray(data)
def GammaCorrection_Horizontal(self, img, low, high):
data = np.array(img)
step = (high - low) / data.shape[1]
orientationFlag = int(random.uniform(0, 1) + 0.5)
if orientationFlag == 1:
gamma = low
for j in range(data.shape[1]):
lut = [max(min(math.pow(i / 255.0, gamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
for i in range(data.shape[0]):
data[i, j, :] = lut[data[i, j, :]]
gamma += step
else:
gamma = low
for j in range(data.shape[1]):
lut = [max(min(math.pow(i / 255.0, gamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
for i in range(data.shape[0]):
data[i, data.shape[1] - 1 - j, :] = lut[data[i, data.shape[1] - 1 - j, :]]
gamma += step
return Image.fromarray(data)
def GammaCorrection_channel(self, img, fGamma):
data = np.array(img)
lut = [max(min(math.pow(i / 255.0, fGamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
ind_channel = int(random.uniform(0, 2) + 0.5)
data[:, :, ind_channel] = lut[data[:, :, ind_channel]]
return Image.fromarray(data)
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
if self.projection_type is None:
projection_type = int(random.uniform(0, 2) + 0.5)
if isinstance(self.projection_type, list):
random.shuffle(self.projection_type)
projection_type = self.projection_type[0]
if projection_type == 0:
gamma = random.uniform(self.low_0, self.high_0)
img = self.GammaCorrection_uniform(img, gamma)
elif projection_type == 1:
low = self.low_1
high = random.uniform(low, self.high_1)
img = self.GammaCorrection_Horizontal(img, low, high)
elif projection_type == 2:
gamma = random.uniform(self.low_2, self.high_2)
img = self.GammaCorrection_channel(img, gamma)
else:
raise (RuntimeError('projection_type should be in [0,1,2]' + str(self.projection_type)))
return img
class BlurProjection(object):
"""BlurProjection
Args:
ratio: ratio to use the projection
"""
def __init__(self, ratio=0, guassian_low=0, guassian_high=3, downsample_low=0.25, downsample_high=5, psf_len_low=2,
psf_len_high=5, psf_ang_low=1, psf_ang_high=180, projection_type=None):
self.ratio = ratio
self.guassian_low = guassian_low
self.guassian_high = guassian_high
self.downsample_low = downsample_low
self.downsample_high = downsample_high
self.psf_len_low = psf_len_low
self.psf_len_high = psf_len_high
self.psf_ang_low = psf_ang_low
self.psf_ang_high = psf_ang_high
self.projection_type = projection_type
def Gaussian(self, img, radius):
img = img.filter(ImageFilter.GaussianBlur(radius=radius))
return img
def DownSample(self, img, downRate):
rows, cols = img.size
rows_new = int(rows * downRate)
cols_new = int(cols * downRate)
img = img.resize((rows_new, cols_new), resample=Image.BILINEAR)
img = img.resize((rows, cols), resample=Image.BILINEAR)
return img
def Psf(self, img, len_, angle):
EPS = 0.00000001
if int(angle) % 90 == 0:
angle -= 1
half = len_ / 2
alpha = (angle - (angle / 180) * 180) / 180 * math.pi
cosalpha = math.cos(alpha)
sinalpha = math.sin(alpha)
if cosalpha < 0:
xsign = -1
else:
if angle == 90:
xsign = 0
else:
xsign = 1
psfwdt = 1
sx = int(abs(half * cosalpha + psfwdt * xsign - len_ * EPS) + 0.5)
sy = int(abs(half * sinalpha + psfwdt - len_ * EPS) + 0.5)
psf1 = np.zeros((sy, sx))
psf2 = np.zeros((sy * 2, sx * 2))
# row = 2 * sy
# col = 2 * sx
for i in range(sy):
for j in range(sx):
psf1[i, j] = i * abs(cosalpha) - j * sinalpha
rad = math.sqrt(i * i + j * j)
if rad >= half and abs(psf1[i, j]) <= psfwdt:
tmp = half - abs((j + psf1[i, j] * sinalpha) / (cosalpha + EPS))
psf1[i, j] = math.sqrt(psf1[i, j] * psf1[i, j] + tmp * tmp)
psf2[:sy, :sx] = psf1[:sy, :sx]
for i in range(sy):
for j in range(sx):
psf2[2 * sy - 1 - i, 2 * sx - 1 - j] = psf1[i, j]
psf2[sy + i][j] = 0
psf2[i][sx + j] = 0
sum_ = psf2.sum()
psf2 = psf2 / (sum_ + EPS)
if cosalpha > 0:
for i in range(sy):
tmp = np.copy(psf2[i])
psf2[i] = np.copy(psf2[2 * sy - i - 1])
psf2[2 * sy - i - 1] = np.copy(tmp)
data = np.array(img)
data[:, :, 0] = ndimage.convolve(data[:, :, 0], psf2)
data[:, :, 1] = ndimage.convolve(data[:, :, 1], psf2)
data[:, :, 2] = ndimage.convolve(data[:, :, 2], psf2)
return Image.fromarray(data)
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
if self.projection_type is None:
projection_type = int(random.uniform(0, 2) + 0.5)
elif isinstance(self.projection_type, list):
random.shuffle(self.projection_type)
projection_type = self.projection_type[0]
else:
projection_type = self.projection_type
if projection_type == 0:
radius = random.uniform(self.guassian_low, self.guassian_high)
img = self.Gaussian(img, radius)
elif projection_type == 1:
downsample = random.uniform(self.downsample_low, self.downsample_high)
img = self.DownSample(img, downsample)
elif projection_type == 2:
len_ = random.uniform(self.psf_len_low, self.psf_len_high)
ang = random.uniform(self.psf_ang_low, self.psf_ang_high)
img = self.Psf(img, len_, ang)
else:
raise (RuntimeError('projection_type should be in [0,1,2]' +
str(self.projection_type)))
return img
class ConvertGray(object):
def __init__(self, ratio=0):
self.ratio = ratio
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
return img.convert('L').convert('RGB')
class MultiCropTransform(object):
""" Create multi crops of the same image """
def __init__(self, transform, ori_transform=None, n=2):
self.transform = transform
self.ori_transform = ori_transform
self.n = n
def __call__(self, x):
out = []
if self.ori_transform is None:
for i in range(self.n):
out.append(self.transform(x))
else:
out.append(self.ori_transform(x))
if self.n > 2:
for i in range(self.n - 1):
out.append(self.transform(x))
else:
out.append(self.transform(x))
return out | util/data_transformer.py | from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps, ImageFilter
try:
import accimage
except ImportError:
accimage = None
import scipy.ndimage as ndimage
import numpy as np
import numbers
import types
import collections
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
transforms.Compose([
transforms.CenterCrop(10),
transforms.ToTensor(),
])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
class ToPILImage(object):
"""Convert a tensor to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL.Image while preserving the value range.
"""
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL.Image.
Returns:
PIL.Image: Image converted to PIL.Image.
"""
npimg = pic
mode = None
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if torch.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
assert isinstance(npimg, np.ndarray), 'pic should be Tensor or ndarray'
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
mode = 'L'
if npimg.dtype == np.int16:
mode = 'I;16'
if npimg.dtype == np.int32:
mode = 'I'
elif npimg.dtype == np.float32:
mode = 'F'
else:
if npimg.dtype == np.uint8:
mode = 'RGB'
assert mode is not None, '{} is not supported'.format(npimg.dtype)
return Image.fromarray(npimg, mode=mode)
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class Scale(object):
"""Rescale the input PIL.Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be scaled.
Returns:
PIL.Image: Rescaled image.
"""
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
return img.resize(self.size, self.interpolation)
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th))
class Pad(object):
"""Pad the given PIL.Image on all sides with the given "pad" value.
Args:
padding (int or sequence): Padding on each border. If a sequence of
length 4, it is used to pad left, top, right and bottom borders respectively.
fill: Pixel fill value. Default is 0.
"""
def __init__(self, padding, fill=0):
assert isinstance(padding, numbers.Number)
assert isinstance(fill, numbers.Number) or isinstance(fill, str) or isinstance(fill, tuple)
self.padding = padding
self.fill = fill
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be padded.
Returns:
PIL.Image: Padded image.
"""
return ImageOps.expand(img, border=self.padding, fill=self.fill)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class RandomCrop(object):
"""Crop the given PIL.Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
"""
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return img.crop((x1, y1, x1 + tw, y1 + th))
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __init__(self, flip_prob=0.5):
self.flip_prob = flip_prob
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if random.random() < self.flip_prob:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
class CenterCropWithOffset(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, x_size, y_size, x_offset, y_offset, x_joggle, y_joggle, ignore_fault=False):
self.x_size = x_size
self.y_size = y_size
self.x_offset = x_offset
self.y_offset = y_offset
self.x_joggle = x_joggle
self.y_joggle = y_joggle
self.ignore_fault = ignore_fault
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = img.size
th = self.y_size
tw = self.x_size
x1 = int(round((w - tw) / 2.)) + self.x_offset + random.uniform(0, self.x_joggle) * (
random.uniform(0, 1) - 0.5) * 2
y1 = int(round((h - th) / 2.)) + self.y_offset + random.uniform(0, self.y_joggle) * (
random.uniform(0, 1) - 0.5) * 2
x1 = max(0, x1)
x2 = min(x1 + tw, w)
y1 = max(0, y1)
y2 = min(y1 + th, h)
if y2 - y1 != self.y_size:
if self.ignore_fault:
y1 = 0
th = y2
else:
raise (RuntimeError('(data_transformer)Size Error:y2-y1 != self.y_size:' +
str(y2 - y1) + 'vs' + str(self.y_size)))
if x2 - x1 != self.x_size:
if self.ignore_fault:
x1 = 0
tw = x2
else:
raise (RuntimeError('(data_transformer)Size Error:x2-x1 != self.x_size:' +
str(x2 - x1) + 'vs' + str(self.x_size)))
return img.crop((x1, y1, x1 + tw, y1 + th))
class GammaCorrection(object):
"""GammaCorrection
Args:
ratio: ratio to use the projection
low_0, high_0. different type of parameter
"""
def __init__(self, ratio=0, low_0=0.5, high_0=1.8, low_1=0.5, high_1=3, low_2=0.5, high_2=1, projection_type=None):
self.ratio = ratio
self.low_0 = low_0
self.high_0 = high_0
self.low_1 = low_1
self.high_1 = high_1
self.low_2 = low_2
self.high_2 = high_2
self.projection_type = projection_type
def GammaCorrection_uniform(self, img, fGamma):
data = np.array(img)
lut = [max(min(math.pow(i / 255.0, fGamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
data[:, :, :] = lut[data[:, :, :]]
return Image.fromarray(data)
def GammaCorrection_Horizontal(self, img, low, high):
data = np.array(img)
step = (high - low) / data.shape[1]
orientationFlag = int(random.uniform(0, 1) + 0.5)
if orientationFlag == 1:
gamma = low
for j in range(data.shape[1]):
lut = [max(min(math.pow(i / 255.0, gamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
for i in range(data.shape[0]):
data[i, j, :] = lut[data[i, j, :]]
gamma += step
else:
gamma = low
for j in range(data.shape[1]):
lut = [max(min(math.pow(i / 255.0, gamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
for i in range(data.shape[0]):
data[i, data.shape[1] - 1 - j, :] = lut[data[i, data.shape[1] - 1 - j, :]]
gamma += step
return Image.fromarray(data)
def GammaCorrection_channel(self, img, fGamma):
data = np.array(img)
lut = [max(min(math.pow(i / 255.0, fGamma) * 255.0, 255), 0) for i in range(256)]
lut = np.array(lut)
ind_channel = int(random.uniform(0, 2) + 0.5)
data[:, :, ind_channel] = lut[data[:, :, ind_channel]]
return Image.fromarray(data)
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
if self.projection_type is None:
projection_type = int(random.uniform(0, 2) + 0.5)
if isinstance(self.projection_type, list):
random.shuffle(self.projection_type)
projection_type = self.projection_type[0]
if projection_type == 0:
gamma = random.uniform(self.low_0, self.high_0)
img = self.GammaCorrection_uniform(img, gamma)
elif projection_type == 1:
low = self.low_1
high = random.uniform(low, self.high_1)
img = self.GammaCorrection_Horizontal(img, low, high)
elif projection_type == 2:
gamma = random.uniform(self.low_2, self.high_2)
img = self.GammaCorrection_channel(img, gamma)
else:
raise (RuntimeError('projection_type should be in [0,1,2]' + str(self.projection_type)))
return img
class BlurProjection(object):
"""BlurProjection
Args:
ratio: ratio to use the projection
"""
def __init__(self, ratio=0, guassian_low=0, guassian_high=3, downsample_low=0.25, downsample_high=5, psf_len_low=2,
psf_len_high=5, psf_ang_low=1, psf_ang_high=180, projection_type=None):
self.ratio = ratio
self.guassian_low = guassian_low
self.guassian_high = guassian_high
self.downsample_low = downsample_low
self.downsample_high = downsample_high
self.psf_len_low = psf_len_low
self.psf_len_high = psf_len_high
self.psf_ang_low = psf_ang_low
self.psf_ang_high = psf_ang_high
self.projection_type = projection_type
def Gaussian(self, img, radius):
img = img.filter(ImageFilter.GaussianBlur(radius=radius))
return img
def DownSample(self, img, downRate):
rows, cols = img.size
rows_new = int(rows * downRate)
cols_new = int(cols * downRate)
img = img.resize((rows_new, cols_new), resample=Image.BILINEAR)
img = img.resize((rows, cols), resample=Image.BILINEAR)
return img
def Psf(self, img, len_, angle):
EPS = 0.00000001
if int(angle) % 90 == 0:
angle -= 1
half = len_ / 2
alpha = (angle - (angle / 180) * 180) / 180 * math.pi
cosalpha = math.cos(alpha)
sinalpha = math.sin(alpha)
if cosalpha < 0:
xsign = -1
else:
if angle == 90:
xsign = 0
else:
xsign = 1
psfwdt = 1
sx = int(abs(half * cosalpha + psfwdt * xsign - len_ * EPS) + 0.5)
sy = int(abs(half * sinalpha + psfwdt - len_ * EPS) + 0.5)
psf1 = np.zeros((sy, sx))
psf2 = np.zeros((sy * 2, sx * 2))
# row = 2 * sy
# col = 2 * sx
for i in range(sy):
for j in range(sx):
psf1[i, j] = i * abs(cosalpha) - j * sinalpha
rad = math.sqrt(i * i + j * j)
if rad >= half and abs(psf1[i, j]) <= psfwdt:
tmp = half - abs((j + psf1[i, j] * sinalpha) / (cosalpha + EPS))
psf1[i, j] = math.sqrt(psf1[i, j] * psf1[i, j] + tmp * tmp)
psf2[:sy, :sx] = psf1[:sy, :sx]
for i in range(sy):
for j in range(sx):
psf2[2 * sy - 1 - i, 2 * sx - 1 - j] = psf1[i, j]
psf2[sy + i][j] = 0
psf2[i][sx + j] = 0
sum_ = psf2.sum()
psf2 = psf2 / (sum_ + EPS)
if cosalpha > 0:
for i in range(sy):
tmp = np.copy(psf2[i])
psf2[i] = np.copy(psf2[2 * sy - i - 1])
psf2[2 * sy - i - 1] = np.copy(tmp)
data = np.array(img)
data[:, :, 0] = ndimage.convolve(data[:, :, 0], psf2)
data[:, :, 1] = ndimage.convolve(data[:, :, 1], psf2)
data[:, :, 2] = ndimage.convolve(data[:, :, 2], psf2)
return Image.fromarray(data)
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
if self.projection_type is None:
projection_type = int(random.uniform(0, 2) + 0.5)
elif isinstance(self.projection_type, list):
random.shuffle(self.projection_type)
projection_type = self.projection_type[0]
else:
projection_type = self.projection_type
if projection_type == 0:
radius = random.uniform(self.guassian_low, self.guassian_high)
img = self.Gaussian(img, radius)
elif projection_type == 1:
downsample = random.uniform(self.downsample_low, self.downsample_high)
img = self.DownSample(img, downsample)
elif projection_type == 2:
len_ = random.uniform(self.psf_len_low, self.psf_len_high)
ang = random.uniform(self.psf_ang_low, self.psf_ang_high)
img = self.Psf(img, len_, ang)
else:
raise (RuntimeError('projection_type should be in [0,1,2]' +
str(self.projection_type)))
return img
class ConvertGray(object):
def __init__(self, ratio=0):
self.ratio = ratio
def __call__(self, img):
if random.uniform(0, 1) >= self.ratio:
return img
return img.convert('L').convert('RGB')
class MultiCropTransform(object):
""" Create multi crops of the same image """
def __init__(self, transform, ori_transform=None, n=2):
self.transform = transform
self.ori_transform = ori_transform
self.n = n
def __call__(self, x):
out = []
if self.ori_transform is None:
for i in range(self.n):
out.append(self.transform(x))
else:
out.append(self.ori_transform(x))
if self.n > 2:
for i in range(self.n - 1):
out.append(self.transform(x))
else:
out.append(self.transform(x))
return out | 0.896707 | 0.473596 |
"""Module containing the BoxResidues class and the command line interface."""
import argparse
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_vs.utils.common import *
class BoxResidues(BiobbObject):
"""
| biobb_vs BoxResidues
| This class sets the center and the size of a rectangular parallelepiped box around a set of residues.
| Sets the center and the size of a rectangular parallelepiped box around a selection of residues found in a given PDB. The residue identifiers that compose the selection (i.e. binding site) are provided by a property list.
Args:
input_pdb_path (str): PDB protein structure for which the box will be build. Its size and center will be set around the 'resid_list' property once mapped against this PDB. File type: input. `Sample file <https://github.com/bioexcel/biobb_vs/raw/master/biobb_vs/test/data/utils/input_box_residues.pdb>`_. Accepted formats: pdb (edam:format_1476).
output_pdb_path (str): PDB including the annotation of the box center and size as REMARKs. File type: output. `Sample file <https://github.com/bioexcel/biobb_vs/raw/master/biobb_vs/test/reference/utils/ref_output_box_residues.pdb>`_. Accepted formats: pdb (edam:format_1476).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **resid_list** (*list*) - (None) List with all the residue numbers to form a cavity or binding site. Mandatory property.
* **offset** (*float*) - (2.0) [0.1~1000|0.1] Extra distance (Angstroms) between the last residue atom and the box boundary.
* **box_coordinates** (*bool*) - (False) Add box coordinates as 8 ATOM records.
* **residue_offset** (*int*) - (0) [0~1000|1] Residue id offset.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_vs.utils.box_residues import box_residues
prop = {
'resid_list': [718, 743, 745, 762, 766, 796, 790, 791, 793, 794, 788],
'offset': 2,
'box_coordinates': True
}
box_residues(input_pdb_path='/path/to/myStructure.pdb',
output_pdb_path='/path/to/newBox.pdb',
properties=prop)
Info:
* wrapped_software:
* name: In house using Biopython
* version: >=1.76
* license: Apache-2.0
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_pdb_path, output_pdb_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": { "input_pdb_path": input_pdb_path },
"out": { "output_pdb_path": output_pdb_path }
}
# Properties specific for BB
self.resid_list = properties.get('resid_list', [])
self.offset = float(properties.get('offset', 2.0))
self.box_coordinates = float(properties.get('box_coordinates', False))
self.residue_offset = properties.get('residue_offset', 0)
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.io_dict["in"]["input_pdb_path"] = check_input_path(self.io_dict["in"]["input_pdb_path"],"input_pdb_path", self.out_log, self.__class__.__name__)
self.io_dict["out"]["output_pdb_path"] = check_output_path(self.io_dict["out"]["output_pdb_path"],"output_pdb_path", False, self.out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`BoxResidues <utils.box_residues.BoxResidues>` utils.box_residues.BoxResidues object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Parse structure
fu.log('Loading input PDB structure %s' % (self.io_dict["in"]["input_pdb_path"]), self.out_log, self.global_log)
structure_name = PurePath(self.io_dict["in"]["input_pdb_path"]).name
parser = Bio.PDB.PDBParser(QUIET = True)
structPDB = parser.get_structure(structure_name, self.io_dict["in"]["input_pdb_path"])
if len(structPDB):
structPDB = structPDB[0]
## Mapping residue structure into input structure
fu.log('Mapping residue structure into input structure', self.out_log, self.global_log)
# Listing residues to be selected from the residue structure
residPDB_res_list = []
for residPDB_res in self.resid_list:
if self.residue_offset:
residPDB_res_list.append((' ', residPDB_res+self.residue_offset, ' '))
else:
residPDB_res_list.append((' ', residPDB_res, ' '))
selection_res_list = []
selection_atoms_num = 0
for struct_chain in structPDB:
for struct_res in struct_chain:
if struct_res.get_id() in residPDB_res_list:
selection_res_list.append(struct_res)
selection_atoms_num += len(struct_res.get_list())
if len(selection_res_list) == 0:
fu.log(self.__class__.__name__ + ': Cannot match any of the residues listed in [%s] into %s' % (', '.join(str(v) for v in self.resid_list), self.io_dict["in"]["input_pdb_path"]), self.out_log)
raise SystemExit(self.__class__.__name__ + ': Cannot match any of the residues listed in [%s] into %s' % (', '.join(str(v) for v in self.resid_list), self.io_dict["in"]["input_pdb_path"]))
elif len(selection_res_list) != len(residPDB_res_list):
fu.log('Cannot match all the residues listed in %s into %s. Found %s out of %s' % (', '.join(str(v) for v in self.resid_list),self.io_dict["in"]["input_pdb_path"], len(selection_res_list),len(residPDB_res_list)), self.out_log)
else:
fu.log('Selection of residues successfully matched', self.out_log, self.global_log)
## Compute binding site box size
# compute box center
selection_box_center = sum(atom.coord for res in selection_res_list for atom in res.get_atoms()) / selection_atoms_num
fu.log('Binding site center (Angstroms): %10.3f%10.3f%10.3f' % (selection_box_center[0],selection_box_center[1],selection_box_center[2]), self.out_log, self.global_log)
# compute box size
selection_coords_max = np.amax([atom.coord for res in selection_res_list for atom in res.get_atoms()],axis=0)
selection_box_size = selection_coords_max - selection_box_center
if self.offset:
selection_box_size = [c + self.offset for c in selection_box_size]
fu.log('Binding site size (Angstroms): %10.3f%10.3f%10.3f' % (selection_box_size[0],selection_box_size[1],selection_box_size[2]), self.out_log, self.global_log)
# compute volume
vol = np.prod(selection_box_size) * 2**3
fu.log('Volume (cubic Angstroms): %.0f' % (vol), self.out_log, self.global_log)
# add box details as PDB remarks
remarks = "REMARK BOX CENTER:%10.3f%10.3f%10.3f" % (selection_box_center[0],selection_box_center[1],selection_box_center[2])
remarks += " SIZE:%10.3f%10.3f%10.3f" % (selection_box_size[0],selection_box_size[1],selection_box_size[2])
selection_box_coords_txt = ""
# add (optional) box coordinates as 8 ATOM records
if self.box_coordinates:
fu.log('Adding box coordinates', self.out_log, self.global_log)
selection_box_coords_txt = get_box_coordinates(selection_box_center,selection_box_size)
with open(self.io_dict["out"]["output_pdb_path"], 'w') as f:
f.seek(0, 0)
f.write(remarks.rstrip('\r\n') + '\n' + selection_box_coords_txt)
fu.log('Saving output PDB file (with box setting annotations): %s' % (self.io_dict["out"]["output_pdb_path"]), self.out_log, self.global_log)
return 0
def box_residues(input_pdb_path: str, output_pdb_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`BoxResidues <utils.box_residues.BoxResidues>` class and
execute the :meth:`launch() <utils.box_residues.BoxResidues.launch>` method."""
return BoxResidues(input_pdb_path=input_pdb_path,
output_pdb_path=output_pdb_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Sets the center and the size of a rectangular parallelepiped box around a selection of residues found in a given PDB.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_pdb_path', required=True, help='PDB protein structure for which the box will be build. Its size and center will be set around the \'resid_list\' property once mapped against this PDB. Accepted formats: pdb.')
required_args.add_argument('--output_pdb_path', required=True, help='PDB including the annotation of the box center and size as REMARKs. Accepted formats: pdb.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
# Specific call of each building block
box_residues(input_pdb_path=args.input_pdb_path,
output_pdb_path=args.output_pdb_path,
properties=properties)
if __name__ == '__main__':
main() | biobb_vs/utils/box_residues.py |
"""Module containing the BoxResidues class and the command line interface."""
import argparse
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_vs.utils.common import *
class BoxResidues(BiobbObject):
"""
| biobb_vs BoxResidues
| This class sets the center and the size of a rectangular parallelepiped box around a set of residues.
| Sets the center and the size of a rectangular parallelepiped box around a selection of residues found in a given PDB. The residue identifiers that compose the selection (i.e. binding site) are provided by a property list.
Args:
input_pdb_path (str): PDB protein structure for which the box will be build. Its size and center will be set around the 'resid_list' property once mapped against this PDB. File type: input. `Sample file <https://github.com/bioexcel/biobb_vs/raw/master/biobb_vs/test/data/utils/input_box_residues.pdb>`_. Accepted formats: pdb (edam:format_1476).
output_pdb_path (str): PDB including the annotation of the box center and size as REMARKs. File type: output. `Sample file <https://github.com/bioexcel/biobb_vs/raw/master/biobb_vs/test/reference/utils/ref_output_box_residues.pdb>`_. Accepted formats: pdb (edam:format_1476).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **resid_list** (*list*) - (None) List with all the residue numbers to form a cavity or binding site. Mandatory property.
* **offset** (*float*) - (2.0) [0.1~1000|0.1] Extra distance (Angstroms) between the last residue atom and the box boundary.
* **box_coordinates** (*bool*) - (False) Add box coordinates as 8 ATOM records.
* **residue_offset** (*int*) - (0) [0~1000|1] Residue id offset.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_vs.utils.box_residues import box_residues
prop = {
'resid_list': [718, 743, 745, 762, 766, 796, 790, 791, 793, 794, 788],
'offset': 2,
'box_coordinates': True
}
box_residues(input_pdb_path='/path/to/myStructure.pdb',
output_pdb_path='/path/to/newBox.pdb',
properties=prop)
Info:
* wrapped_software:
* name: In house using Biopython
* version: >=1.76
* license: Apache-2.0
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_pdb_path, output_pdb_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": { "input_pdb_path": input_pdb_path },
"out": { "output_pdb_path": output_pdb_path }
}
# Properties specific for BB
self.resid_list = properties.get('resid_list', [])
self.offset = float(properties.get('offset', 2.0))
self.box_coordinates = float(properties.get('box_coordinates', False))
self.residue_offset = properties.get('residue_offset', 0)
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.io_dict["in"]["input_pdb_path"] = check_input_path(self.io_dict["in"]["input_pdb_path"],"input_pdb_path", self.out_log, self.__class__.__name__)
self.io_dict["out"]["output_pdb_path"] = check_output_path(self.io_dict["out"]["output_pdb_path"],"output_pdb_path", False, self.out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`BoxResidues <utils.box_residues.BoxResidues>` utils.box_residues.BoxResidues object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Parse structure
fu.log('Loading input PDB structure %s' % (self.io_dict["in"]["input_pdb_path"]), self.out_log, self.global_log)
structure_name = PurePath(self.io_dict["in"]["input_pdb_path"]).name
parser = Bio.PDB.PDBParser(QUIET = True)
structPDB = parser.get_structure(structure_name, self.io_dict["in"]["input_pdb_path"])
if len(structPDB):
structPDB = structPDB[0]
## Mapping residue structure into input structure
fu.log('Mapping residue structure into input structure', self.out_log, self.global_log)
# Listing residues to be selected from the residue structure
residPDB_res_list = []
for residPDB_res in self.resid_list:
if self.residue_offset:
residPDB_res_list.append((' ', residPDB_res+self.residue_offset, ' '))
else:
residPDB_res_list.append((' ', residPDB_res, ' '))
selection_res_list = []
selection_atoms_num = 0
for struct_chain in structPDB:
for struct_res in struct_chain:
if struct_res.get_id() in residPDB_res_list:
selection_res_list.append(struct_res)
selection_atoms_num += len(struct_res.get_list())
if len(selection_res_list) == 0:
fu.log(self.__class__.__name__ + ': Cannot match any of the residues listed in [%s] into %s' % (', '.join(str(v) for v in self.resid_list), self.io_dict["in"]["input_pdb_path"]), self.out_log)
raise SystemExit(self.__class__.__name__ + ': Cannot match any of the residues listed in [%s] into %s' % (', '.join(str(v) for v in self.resid_list), self.io_dict["in"]["input_pdb_path"]))
elif len(selection_res_list) != len(residPDB_res_list):
fu.log('Cannot match all the residues listed in %s into %s. Found %s out of %s' % (', '.join(str(v) for v in self.resid_list),self.io_dict["in"]["input_pdb_path"], len(selection_res_list),len(residPDB_res_list)), self.out_log)
else:
fu.log('Selection of residues successfully matched', self.out_log, self.global_log)
## Compute binding site box size
# compute box center
selection_box_center = sum(atom.coord for res in selection_res_list for atom in res.get_atoms()) / selection_atoms_num
fu.log('Binding site center (Angstroms): %10.3f%10.3f%10.3f' % (selection_box_center[0],selection_box_center[1],selection_box_center[2]), self.out_log, self.global_log)
# compute box size
selection_coords_max = np.amax([atom.coord for res in selection_res_list for atom in res.get_atoms()],axis=0)
selection_box_size = selection_coords_max - selection_box_center
if self.offset:
selection_box_size = [c + self.offset for c in selection_box_size]
fu.log('Binding site size (Angstroms): %10.3f%10.3f%10.3f' % (selection_box_size[0],selection_box_size[1],selection_box_size[2]), self.out_log, self.global_log)
# compute volume
vol = np.prod(selection_box_size) * 2**3
fu.log('Volume (cubic Angstroms): %.0f' % (vol), self.out_log, self.global_log)
# add box details as PDB remarks
remarks = "REMARK BOX CENTER:%10.3f%10.3f%10.3f" % (selection_box_center[0],selection_box_center[1],selection_box_center[2])
remarks += " SIZE:%10.3f%10.3f%10.3f" % (selection_box_size[0],selection_box_size[1],selection_box_size[2])
selection_box_coords_txt = ""
# add (optional) box coordinates as 8 ATOM records
if self.box_coordinates:
fu.log('Adding box coordinates', self.out_log, self.global_log)
selection_box_coords_txt = get_box_coordinates(selection_box_center,selection_box_size)
with open(self.io_dict["out"]["output_pdb_path"], 'w') as f:
f.seek(0, 0)
f.write(remarks.rstrip('\r\n') + '\n' + selection_box_coords_txt)
fu.log('Saving output PDB file (with box setting annotations): %s' % (self.io_dict["out"]["output_pdb_path"]), self.out_log, self.global_log)
return 0
def box_residues(input_pdb_path: str, output_pdb_path: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`BoxResidues <utils.box_residues.BoxResidues>` class and
execute the :meth:`launch() <utils.box_residues.BoxResidues.launch>` method."""
return BoxResidues(input_pdb_path=input_pdb_path,
output_pdb_path=output_pdb_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Sets the center and the size of a rectangular parallelepiped box around a selection of residues found in a given PDB.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_pdb_path', required=True, help='PDB protein structure for which the box will be build. Its size and center will be set around the \'resid_list\' property once mapped against this PDB. Accepted formats: pdb.')
required_args.add_argument('--output_pdb_path', required=True, help='PDB including the annotation of the box center and size as REMARKs. Accepted formats: pdb.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
# Specific call of each building block
box_residues(input_pdb_path=args.input_pdb_path,
output_pdb_path=args.output_pdb_path,
properties=properties)
if __name__ == '__main__':
main() | 0.910658 | 0.509825 |
import argparse
import ipaddress
import logging
import os
import re
import sys
from typing import Tuple
from ssl_certinfo import __author__, __email__, __version__, ssl_certinfo, validation
from ssl_certinfo.ssl_certinfo import OutputFormat
VERSION = rf"""
ssl_certinfo {__version__}
Copyright (C) 2020 {__author__} ({__email__})
License Apache-2.0: <http://www.apache.org/licenses/LICENSE-2.0>.
"""
def check_hostname_or_ip_address(value):
"""Validate argparse type hostname/ip address."""
if (
not validation.is_valid_hostname(value)
and not validation.is_valid_ip_address(value)
and not validation.is_valid_ip_network(value)
and not validation.is_valid_ip_range(value)
):
raise argparse.ArgumentTypeError(
"%s is not a valid hostname or ip address" % value
)
return value
def check_proxy_url(value):
"""Validate if parameter is a valid proxy url."""
try:
parsed = parse_proxy_url(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is not a valid proxy url" % value)
return parsed
def parse_proxy_url(proxyurl) -> Tuple[str, str, int]:
if proxyurl == "":
return None
proto = host = port = ""
match = re.match(r"^((http[s]?|socks):\/\/)?([^:\/\s]+)(:(\d+))?$", proxyurl)
if match:
x, proto, host, x, port = match.groups(default="")
else:
locallogger = logging.getLogger("validate.parse_proxy_url")
locallogger.debug("Not a valid proxy url: {}".format(proxyurl))
raise ValueError("Not a valid proxy url: {}".format(proxyurl))
if not proto:
proto = "http"
if (
host
and not validation.is_valid_hostname(host)
and not validation.is_valid_ip_address(host)
):
raise ValueError("Not a valid hostname or ip address: {}".format(port))
if not port:
port = 3128
elif not (0 < int(port) <= 65535):
raise ValueError("Invalid port number: {}".format(port))
return proto, host, int(port)
def check_positive(value):
"""Validate argparse type positive integer."""
try:
ivalue = int(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is not an int value" % value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is not a positive int value" % value)
return ivalue
def check_valid_port(value):
"""Validate argparse type TCP port number."""
try:
ivalue = int(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is an invalid port number" % value)
if ivalue <= 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is an invalid port number" % value)
return ivalue
def expand_hosts(hostlist):
result = []
for elem in hostlist:
if validation.is_valid_hostname(elem) or validation.is_valid_ip_address(elem):
result.append(elem)
elif validation.is_valid_ip_range(elem):
separator = re.compile(r" *- *")
(start, end) = separator.split(elem)
try:
start_addr = ipaddress.ip_address(start)
end_addr = ipaddress.ip_address(end)
except ValueError:
pass
else:
logging.debug("Expanding ip address range " + elem)
current_ipaddr = start_addr
while current_ipaddr <= end_addr:
result.append(str(current_ipaddr))
current_ipaddr = current_ipaddr + 1
else:
try:
net = ipaddress.ip_network(elem, False)
except ValueError:
pass
else:
logging.debug("Expanding ip network " + elem)
for ipaddr in net:
result.append(str(ipaddr))
return result
def create_parser():
"""Create ArgParser."""
parser = argparse.ArgumentParser(
description="Collect information about SSL certificates from a set of hosts"
)
parser.add_argument(
"-V",
"--version",
action="store_true",
dest="displayVersion",
help="display version information and exit",
)
verb_group = parser.add_mutually_exclusive_group()
verb_group.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="verbose output (repeat for increased verbosity)",
)
verb_group.add_argument(
"-q",
"--quiet",
action="store_const",
const=-1,
default=0,
dest="verbosity",
help="quiet output (show errors only)",
)
parser.add_argument(
"host", nargs="*", type=check_hostname_or_ip_address, help="Connect to HOST",
)
parser.add_argument(
"-p",
"--port",
default=443,
type=check_valid_port,
help="TCP port to connnect to [0-65535]",
)
parser.add_argument(
"-t",
"--timeout",
default=5,
type=check_positive,
help="Maximum time allowed for connection",
)
parser.add_argument(
"-x",
"--proxy",
default=get_proxy_from_env(),
type=check_proxy_url,
help="Use the specified proxy",
metavar="[protocol://]host[:port]",
)
output_format = parser.add_mutually_exclusive_group()
output_format.add_argument(
"-T",
"--table",
action="store_const",
const=OutputFormat.TABLE,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in table format",
)
output_format.add_argument(
"-j",
"--json",
action="store_const",
const=OutputFormat.JSON,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in JSON format",
)
output_format.add_argument(
"-y",
"--yaml",
action="store_const",
const=OutputFormat.YAML,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in YAML format",
)
output_format.add_argument(
"-c",
"--csv",
action="store_const",
const=OutputFormat.CSV,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in CSV format",
)
output_format.add_argument(
"-r",
"--raw",
action="store_const",
const=OutputFormat.RAW,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in raw format",
)
return parser
def get_proxy_from_env():
locallogger = logging.getLogger("cli.get_proxy_from_env")
env_keys = [
"http_proxy",
"HTTP_PROXY",
"https_proxy",
"HTTPS_PROXY",
]
env = os.environ
for key in env_keys:
if key in env:
locallogger.debug(
"Environment variable {} found with value: {}".format(key, env[key])
)
return env[key]
locallogger.debug("No proxy environment variable found.")
return ""
def setup_logging(verbosity):
base_loglevel = 30
verbosity = min(verbosity, 2)
loglevel = base_loglevel - (verbosity * 10)
logging.basicConfig(level=loglevel, format="%(levelname)s\t%(message)s")
def main():
"""Console script for ssl_certinfo."""
args = create_parser().parse_args()
if args.displayVersion:
print(VERSION)
return 0
setup_logging(args.verbosity)
logging.info("Arguments: " + str(args))
ssl_certinfo.process_hosts(
expand_hosts(args.host), args.port, args.timeout, args.outform, args.proxy
)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover | ssl_certinfo/cli.py | import argparse
import ipaddress
import logging
import os
import re
import sys
from typing import Tuple
from ssl_certinfo import __author__, __email__, __version__, ssl_certinfo, validation
from ssl_certinfo.ssl_certinfo import OutputFormat
VERSION = rf"""
ssl_certinfo {__version__}
Copyright (C) 2020 {__author__} ({__email__})
License Apache-2.0: <http://www.apache.org/licenses/LICENSE-2.0>.
"""
def check_hostname_or_ip_address(value):
"""Validate argparse type hostname/ip address."""
if (
not validation.is_valid_hostname(value)
and not validation.is_valid_ip_address(value)
and not validation.is_valid_ip_network(value)
and not validation.is_valid_ip_range(value)
):
raise argparse.ArgumentTypeError(
"%s is not a valid hostname or ip address" % value
)
return value
def check_proxy_url(value):
"""Validate if parameter is a valid proxy url."""
try:
parsed = parse_proxy_url(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is not a valid proxy url" % value)
return parsed
def parse_proxy_url(proxyurl) -> Tuple[str, str, int]:
if proxyurl == "":
return None
proto = host = port = ""
match = re.match(r"^((http[s]?|socks):\/\/)?([^:\/\s]+)(:(\d+))?$", proxyurl)
if match:
x, proto, host, x, port = match.groups(default="")
else:
locallogger = logging.getLogger("validate.parse_proxy_url")
locallogger.debug("Not a valid proxy url: {}".format(proxyurl))
raise ValueError("Not a valid proxy url: {}".format(proxyurl))
if not proto:
proto = "http"
if (
host
and not validation.is_valid_hostname(host)
and not validation.is_valid_ip_address(host)
):
raise ValueError("Not a valid hostname or ip address: {}".format(port))
if not port:
port = 3128
elif not (0 < int(port) <= 65535):
raise ValueError("Invalid port number: {}".format(port))
return proto, host, int(port)
def check_positive(value):
"""Validate argparse type positive integer."""
try:
ivalue = int(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is not an int value" % value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is not a positive int value" % value)
return ivalue
def check_valid_port(value):
"""Validate argparse type TCP port number."""
try:
ivalue = int(value)
except ValueError:
raise argparse.ArgumentTypeError("%s is an invalid port number" % value)
if ivalue <= 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is an invalid port number" % value)
return ivalue
def expand_hosts(hostlist):
result = []
for elem in hostlist:
if validation.is_valid_hostname(elem) or validation.is_valid_ip_address(elem):
result.append(elem)
elif validation.is_valid_ip_range(elem):
separator = re.compile(r" *- *")
(start, end) = separator.split(elem)
try:
start_addr = ipaddress.ip_address(start)
end_addr = ipaddress.ip_address(end)
except ValueError:
pass
else:
logging.debug("Expanding ip address range " + elem)
current_ipaddr = start_addr
while current_ipaddr <= end_addr:
result.append(str(current_ipaddr))
current_ipaddr = current_ipaddr + 1
else:
try:
net = ipaddress.ip_network(elem, False)
except ValueError:
pass
else:
logging.debug("Expanding ip network " + elem)
for ipaddr in net:
result.append(str(ipaddr))
return result
def create_parser():
"""Create ArgParser."""
parser = argparse.ArgumentParser(
description="Collect information about SSL certificates from a set of hosts"
)
parser.add_argument(
"-V",
"--version",
action="store_true",
dest="displayVersion",
help="display version information and exit",
)
verb_group = parser.add_mutually_exclusive_group()
verb_group.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="verbose output (repeat for increased verbosity)",
)
verb_group.add_argument(
"-q",
"--quiet",
action="store_const",
const=-1,
default=0,
dest="verbosity",
help="quiet output (show errors only)",
)
parser.add_argument(
"host", nargs="*", type=check_hostname_or_ip_address, help="Connect to HOST",
)
parser.add_argument(
"-p",
"--port",
default=443,
type=check_valid_port,
help="TCP port to connnect to [0-65535]",
)
parser.add_argument(
"-t",
"--timeout",
default=5,
type=check_positive,
help="Maximum time allowed for connection",
)
parser.add_argument(
"-x",
"--proxy",
default=get_proxy_from_env(),
type=check_proxy_url,
help="Use the specified proxy",
metavar="[protocol://]host[:port]",
)
output_format = parser.add_mutually_exclusive_group()
output_format.add_argument(
"-T",
"--table",
action="store_const",
const=OutputFormat.TABLE,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in table format",
)
output_format.add_argument(
"-j",
"--json",
action="store_const",
const=OutputFormat.JSON,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in JSON format",
)
output_format.add_argument(
"-y",
"--yaml",
action="store_const",
const=OutputFormat.YAML,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in YAML format",
)
output_format.add_argument(
"-c",
"--csv",
action="store_const",
const=OutputFormat.CSV,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in CSV format",
)
output_format.add_argument(
"-r",
"--raw",
action="store_const",
const=OutputFormat.RAW,
default=OutputFormat.TABLE,
dest="outform",
help="Print results in raw format",
)
return parser
def get_proxy_from_env():
locallogger = logging.getLogger("cli.get_proxy_from_env")
env_keys = [
"http_proxy",
"HTTP_PROXY",
"https_proxy",
"HTTPS_PROXY",
]
env = os.environ
for key in env_keys:
if key in env:
locallogger.debug(
"Environment variable {} found with value: {}".format(key, env[key])
)
return env[key]
locallogger.debug("No proxy environment variable found.")
return ""
def setup_logging(verbosity):
base_loglevel = 30
verbosity = min(verbosity, 2)
loglevel = base_loglevel - (verbosity * 10)
logging.basicConfig(level=loglevel, format="%(levelname)s\t%(message)s")
def main():
"""Console script for ssl_certinfo."""
args = create_parser().parse_args()
if args.displayVersion:
print(VERSION)
return 0
setup_logging(args.verbosity)
logging.info("Arguments: " + str(args))
ssl_certinfo.process_hosts(
expand_hosts(args.host), args.port, args.timeout, args.outform, args.proxy
)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover | 0.558086 | 0.173323 |
import unittest
import ..src.common as common_funcs
import ..src.global_settings as global_settings
import ..src.logger as logger
glob = global_settings.settings()
common = common_funcs.init(glob)
glob.log = logger.start_logging("UNITTEST", "unittest" + "_" + glob.time_str + ".log", glob)
class TestBuilder(unittest.TestCase):
def test_check_for_previous_install(self):
self.assertEqual
def generate_build_report(self):
self.assertEqual
def build_code(self):
self.assertEqual
class TestCommon(unittest.TestCase):
def test_rel_path(self):
# Path empty
self.assertEqual(self.common.rel_path(""), "")
# Relative path
self.assertEqual(self.common.rel_path("some/rel/path"), "some/rel/path")
# Absolute path
self.assertEqual(self.common.rel_path(self.glob.stg['config_path']), "$TOPDIR/config")
def test_find_exact(self):
# File is path
self.assertEqual(self.common.find_exact(self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua", self.glob.stg['module_path']),
self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua")
# Match found
self.assertEqual(self.common.find_exact("slurm.template", self.glob.stg['template_path']), "/scratch/06280/mcawood/bench-framework/templates/sched/slurm.template")
# No match found
self.assertEqual(self.common.find_exact("somefile", self.glob.stg['module_path']), "")
def test_find_partial(self):
# File is path
self.assertEqual(self.common.find_partial(self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua", self.glob.stg['module_path']),
self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua")
# Multiple matches found
self.assertEqual(self.common.find_partial("slurm", self.glob.stg['template_path']), "/scratch/06280/mcawood/bench-framework/templates/sched/slurm.template")
# No matches found
self.assertEqual(self.common.find_partial("gasdgasd", self.glob.stg['template_path']), "")
def test_file_owner(self):
self.assertEqual(self.common.file_owner("/etc/hosts"), "root")
def test_check_module_exists(self):
# Lacking version
self.assertEqual(self.common.check_module_exists("xalt", ""), "xalt/2.8")
# Contains version
self.assertEqual(self.common.check_module_exists("xalt/2.8", ""), "xalt/2.8")
# Invalid module
with self.assertRaises(SystemExit):
self.common.check_module_exists("some_nonexistant_module", "")
# Module use
self.assertEqual(self.common.check_module_exists("mistral", "/scratch/01255/siliu/modulefiles/"), "mistral/2.13.5")
def test_get_module_label(self):
# Module contains slash
self.assertEqual(self.common.get_module_label("intel/18.0.2"), "intel18")
# No slash
self.assertEqual(self.common.get_module_label("intel"), "intel")
def test_get_subdirs(self):
self.assertEqual(self.common.get_subdirs(self.glob.stg['config_path']), ['bench', 'build', 'sched'])
def test_get_installed(self):
self.assertIsNotNone(self.common.get_installed())
def test_check_if_installed(self):
self.assertEqual(self.common.check_if_installed('lammps'))
# def test_(self):
# self.assertEqual(self.common.())
if __name__ == '__main__':
unittest.main() | dev/func_tests.py | import unittest
import ..src.common as common_funcs
import ..src.global_settings as global_settings
import ..src.logger as logger
glob = global_settings.settings()
common = common_funcs.init(glob)
glob.log = logger.start_logging("UNITTEST", "unittest" + "_" + glob.time_str + ".log", glob)
class TestBuilder(unittest.TestCase):
def test_check_for_previous_install(self):
self.assertEqual
def generate_build_report(self):
self.assertEqual
def build_code(self):
self.assertEqual
class TestCommon(unittest.TestCase):
def test_rel_path(self):
# Path empty
self.assertEqual(self.common.rel_path(""), "")
# Relative path
self.assertEqual(self.common.rel_path("some/rel/path"), "some/rel/path")
# Absolute path
self.assertEqual(self.common.rel_path(self.glob.stg['config_path']), "$TOPDIR/config")
def test_find_exact(self):
# File is path
self.assertEqual(self.common.find_exact(self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua", self.glob.stg['module_path']),
self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua")
# Match found
self.assertEqual(self.common.find_exact("slurm.template", self.glob.stg['template_path']), "/scratch/06280/mcawood/bench-framework/templates/sched/slurm.template")
# No match found
self.assertEqual(self.common.find_exact("somefile", self.glob.stg['module_path']), "")
def test_find_partial(self):
# File is path
self.assertEqual(self.common.find_partial(self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua", self.glob.stg['module_path']),
self.glob.stg['module_path'] + self.glob.stg['sl'] + "builder.lua")
# Multiple matches found
self.assertEqual(self.common.find_partial("slurm", self.glob.stg['template_path']), "/scratch/06280/mcawood/bench-framework/templates/sched/slurm.template")
# No matches found
self.assertEqual(self.common.find_partial("gasdgasd", self.glob.stg['template_path']), "")
def test_file_owner(self):
self.assertEqual(self.common.file_owner("/etc/hosts"), "root")
def test_check_module_exists(self):
# Lacking version
self.assertEqual(self.common.check_module_exists("xalt", ""), "xalt/2.8")
# Contains version
self.assertEqual(self.common.check_module_exists("xalt/2.8", ""), "xalt/2.8")
# Invalid module
with self.assertRaises(SystemExit):
self.common.check_module_exists("some_nonexistant_module", "")
# Module use
self.assertEqual(self.common.check_module_exists("mistral", "/scratch/01255/siliu/modulefiles/"), "mistral/2.13.5")
def test_get_module_label(self):
# Module contains slash
self.assertEqual(self.common.get_module_label("intel/18.0.2"), "intel18")
# No slash
self.assertEqual(self.common.get_module_label("intel"), "intel")
def test_get_subdirs(self):
self.assertEqual(self.common.get_subdirs(self.glob.stg['config_path']), ['bench', 'build', 'sched'])
def test_get_installed(self):
self.assertIsNotNone(self.common.get_installed())
def test_check_if_installed(self):
self.assertEqual(self.common.check_if_installed('lammps'))
# def test_(self):
# self.assertEqual(self.common.())
if __name__ == '__main__':
unittest.main() | 0.377196 | 0.259472 |
from scrapy.http import Request
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.contrib.linkextractors import LinkExtractor
from neweggs.processors import NeweggProcessor
class NeweggSpider(Spider):
name = 'newegg'
allowed_domains = ['newegg.com']
start_urls = [
[
'http://www.newegg.com/Game-Guides/SubCategory/ID-643',
'http://www.newegg.com/Game-Guides/SubCategory/ID-643/Page-%s'
],
[
'http://www.newegg.com/Gaming/SubCategory/ID-3190',
'http://www.newegg.com/Gaming/SubCategory/ID-3190/Page-%s'
],
[
'http://www.newegg.com/Downloadable-Games/SubCategory/ID-438',
'http://www.newegg.com/Downloadable-Games/SubCategory/ID-438/Page-%s'
]
]
image_tpl = 'http://images10.newegg.com/NeweggImage/productimage/%s'
meta_page = 'newegg_spider_page'
meta_url_tpl = 'newegg_url_template'
def start_requests(self):
for url in self.start_urls:
yield Request(url[0], meta={self.meta_url_tpl: url[1]},
callback=self.parse_category)
def parse_category(self, response):
# parse list
for link in LinkExtractor(
restrict_xpaths='//*[@class="itemCell"]//*[@class="itemDescription"]/..'
).extract_links(response):
yield Request(link.url, callback=self.parse_details)
# turn the page
if self.meta_page in response.meta:
page = response.meta[self.meta_page] + 1
else:
page = 2
# next list page
if response.xpath('//*[@class="pagination "]//a[starts-with(@href,"javascript:Biz.Common.Pagination") and text()=%s]' % page):
yield Request(response.meta[self.meta_url_tpl] % page, callback=self.parse_category,
meta={
self.meta_page: page,
self.meta_url_tpl: response.meta[self.meta_url_tpl]
})
def parse_details(self, response):
"""Parse product details into item.
@url http://www.newegg.com/Product/Product.aspx?Item=N82E16832205158
@returns items 1 1
@scrapes title image url price
"""
l = NeweggProcessor(response=response, image_tpl=self.image_tpl)
l.selector = Selector(response)
l.add_xpath('title', '//h1/span[@itemprop="name"]/text()')
l.add_xpath('image', '//script', re=r'"imageName":"([\d-]+.\w+)"')
l.add_value('url', response.url)
l.add_xpath('price', '//script', re=r"site_currency:'(\w+)',")
l.add_xpath(
'price', '//script', re=r"product_sale_price:\['(\d+.\d+)'\],")
yield l.load_item() | neweggs/spiders/newegg.py | from scrapy.http import Request
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.contrib.linkextractors import LinkExtractor
from neweggs.processors import NeweggProcessor
class NeweggSpider(Spider):
name = 'newegg'
allowed_domains = ['newegg.com']
start_urls = [
[
'http://www.newegg.com/Game-Guides/SubCategory/ID-643',
'http://www.newegg.com/Game-Guides/SubCategory/ID-643/Page-%s'
],
[
'http://www.newegg.com/Gaming/SubCategory/ID-3190',
'http://www.newegg.com/Gaming/SubCategory/ID-3190/Page-%s'
],
[
'http://www.newegg.com/Downloadable-Games/SubCategory/ID-438',
'http://www.newegg.com/Downloadable-Games/SubCategory/ID-438/Page-%s'
]
]
image_tpl = 'http://images10.newegg.com/NeweggImage/productimage/%s'
meta_page = 'newegg_spider_page'
meta_url_tpl = 'newegg_url_template'
def start_requests(self):
for url in self.start_urls:
yield Request(url[0], meta={self.meta_url_tpl: url[1]},
callback=self.parse_category)
def parse_category(self, response):
# parse list
for link in LinkExtractor(
restrict_xpaths='//*[@class="itemCell"]//*[@class="itemDescription"]/..'
).extract_links(response):
yield Request(link.url, callback=self.parse_details)
# turn the page
if self.meta_page in response.meta:
page = response.meta[self.meta_page] + 1
else:
page = 2
# next list page
if response.xpath('//*[@class="pagination "]//a[starts-with(@href,"javascript:Biz.Common.Pagination") and text()=%s]' % page):
yield Request(response.meta[self.meta_url_tpl] % page, callback=self.parse_category,
meta={
self.meta_page: page,
self.meta_url_tpl: response.meta[self.meta_url_tpl]
})
def parse_details(self, response):
"""Parse product details into item.
@url http://www.newegg.com/Product/Product.aspx?Item=N82E16832205158
@returns items 1 1
@scrapes title image url price
"""
l = NeweggProcessor(response=response, image_tpl=self.image_tpl)
l.selector = Selector(response)
l.add_xpath('title', '//h1/span[@itemprop="name"]/text()')
l.add_xpath('image', '//script', re=r'"imageName":"([\d-]+.\w+)"')
l.add_value('url', response.url)
l.add_xpath('price', '//script', re=r"site_currency:'(\w+)',")
l.add_xpath(
'price', '//script', re=r"product_sale_price:\['(\d+.\d+)'\],")
yield l.load_item() | 0.478773 | 0.129018 |
import numpy as np
import pandas as pd
import os
import time
import sys
from build_arrays import *
from features import *
from util import *
from config import *
np.set_printoptions(suppress=True)
if __name__ == '__main__':
# Load chosen worm & model information:
home = config['home']
dataset = config['dataset']
seed_nuclei = config['seed_nuclei']
experiment_name = config['experiment_name']
model = config['model']
start_idx = config['start_idx']
end_idx = config['end_idx']
time_limit = config['time_limit']
num_save = config['num_save']
initial_cost = config['initial_cost']
resources_path = os.path.join(home, 'exacthgm', 'Resources', model)
data_path = os.path.join(home, 'exacthgm', 'Data')
key_path = os.path.join(data_path, 'keys.npy')
array_path = os.path.join(data_path, 'arrays.npy')
keys_all = np.load(key_path, allow_pickle=True)
arrays_all = np.load(array_path, allow_pickle=True)
keys = keys_all[dataset]
arrays = arrays_all[dataset]
max_frames = len(keys)
end_idx = min(end_idx, max_frames)
print('\n')
print('Start Index:', start_idx)
print('End Index:', end_idx,'\n')
for i in range(start_idx, end_idx):
t = keys[i]
nuclei_arr = arrays[i]
n = nuclei_arr.shape[0]
m = int(n/2)
# Define the correct lattice
tl = [(i,i+1) for i in range(0,n,2)][::-1]
# Find the point in embryogenesis:
interval = get_time(t,.05)
print('Index:', i)
print('Key:', np.round(t,2))
print('Interval:', interval)
print('n:', n)
print('Dataset:', dataset)
print('Model:', model)
print('Seeded nuclei:', seed_nuclei)
print('Initial cost:', initial_cost, '\n')
pairs = get_P_MP(nuclei_arr)
P = subset_P_seeds(pairs, seed_nuclei, tl)
P = order_tail_pairs(P, nuclei_arr)
n_tails = len(P[0])
best_cost = initial_cost
best_seqs = np.zeros((num_save, m, 2),'int')
best_costs = initial_cost*np.ones(num_save)
if model == 'QAP':
if n == 20:
op_means_widths = os.path.join(resources_path, 'means_widths.npy')
op_sigma_invs_widths = os.path.join(resources_path, 'sigma_invs_widths.npy')
op_means_pairs = os.path.join(resources_path, 'means_pairs.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs.npy')
elif n == 22:
op_means_widths = os.path.join(resources_path, 'means_widths_Q.npy')
op_sigma_invs_widths = os.path.join(resources_path, 'sigma_invs_widths_Q.npy')
op_means_pairs = os.path.join(resources_path, 'means_pairs_Q.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs_Q.npy')
means_widths = np.load(op_means_widths)
sigma_invs_widths = np.load(op_sigma_invs_widths)
means_pairs = np.load(op_means_pairs)
sigma_invs_pairs = np.load(op_sigma_invs_pairs)
# This dataset
this_means_widths = means_widths[dataset]
this_sigma_invs_widths = sigma_invs_widths[dataset]
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_interval_means_widths = this_means_widths[interval][0]
this_interval_sigma_invs_widths = this_sigma_invs_widths[interval][0]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
H = get_H_QAP(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = QAP(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_widths, this_interval_sigma_invs_widths, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'Pairs':
if n == 20:
op_means_pairs = os.path.join(resources_path, 'means_pairs.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs.npy')
op_means_triples = os.path.join(resources_path, 'means_triples.npy')
op_sigma_invs_triples = os.path.join(resources_path, 'sigma_invs_triples.npy')
elif n == 22:
op_means_pairs = os.path.join(resources_path, 'means_pairs_Q.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs_Q.npy')
op_means_triples = os.path.join(resources_path, 'means_triples_Q.npy')
op_sigma_invs_triples = os.path.join(resources_path, 'sigma_invs_triples_Q.npy')
means_pairs = np.load(op_means_pairs)
sigma_invs_pairs = np.load(op_sigma_invs_pairs)
means_triples = np.load(op_means_triples)
sigma_invs_triples = np.load(op_sigma_invs_triples)
# This dataset
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_triples = means_triples[dataset]
this_sigma_invs_triples = sigma_invs_triples[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_triples = this_means_triples[interval]
this_interval_sigma_invs_triples = this_sigma_invs_triples[interval]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = Pairs(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_triples, this_interval_sigma_invs_triples, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'Full':
resources_path = os.path.join(home, 'exacthgm', 'Resources')
if n == 20:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence.npy')
ip_mean_cost_estimates = os.path.join(resources_path, 'Full', 'mean_cost_estimates.npy')
ip_sigma_inv_cost_estimates = os.path.join(resources_path, 'Full', 'sigma_inv_cost_estimates.npy')
elif n == 22:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs_Q.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs_Q.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence_Q.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence_Q.npy')
ip_mean_cost_estimates = os.path.join(resources_path, 'Full', 'mean_cost_estimates_Q.npy')
ip_sigma_inv_cost_estimates = os.path.join(resources_path, 'Full', 'sigma_inv_cost_estimates_Q.npy')
means_pairs = np.load(ip_means_pairs)
sigma_invs_pairs = np.load(ip_sigma_invs_pairs)
means_sequence = np.load(ip_means_seq)
sigma_invs_sequence = np.load(ip_sigma_invs_seq)
mean_cost_estimates = np.load(ip_mean_cost_estimates)
sigma_inv_cost_estimates = np.load(ip_sigma_inv_cost_estimates)
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_sequence = means_sequence[dataset]
this_sigma_invs_sequence = sigma_invs_sequence[dataset]
this_mean_cost_est = mean_cost_estimates[dataset]
this_sigma_inv_cost_est = sigma_inv_cost_estimates[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_sequence = this_means_sequence[interval]
this_interval_sigma_invs_sequence = this_sigma_invs_sequence[interval]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = Full(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_sequence, this_interval_sigma_invs_sequence, this_mean_cost_est, this_sigma_inv_cost_est, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'PF':
resources_path = os.path.join(home, 'exacthgm', 'Resources')
if n == 20:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs.npy')
ip_means_triples = os.path.join(resources_path, 'Pairs', 'means_triples.npy')
ip_sigma_invs_triples = os.path.join(resources_path, 'Pairs', 'sigma_invs_triples.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence.npy')
elif n == 22:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs_Q.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs_Q.npy')
ip_means_triples = os.path.join(resources_path, 'Pairs', 'means_triples_Q.npy')
ip_sigma_invs_triples = os.path.join(resources_path, 'Pairs', 'sigma_invs_triples_Q.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence_Q.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence_Q.npy')
means_pairs = np.load(ip_means_pairs)
sigma_invs_pairs = np.load(ip_sigma_invs_pairs)
means_triples = np.load(ip_means_triples)
sigma_invs_triples = np.load(ip_sigma_invs_triples)
means_sequence = np.load(ip_means_seq)
sigma_invs_sequence = np.load(ip_sigma_invs_seq)
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_triples = means_triples[dataset]
this_sigma_invs_triples = sigma_invs_triples[dataset]
this_means_sequence = means_sequence[dataset]
this_sigma_invs_sequence = sigma_invs_sequence[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_triples = this_means_triples[interval]
this_interval_sigma_invs_triples = this_sigma_invs_triples[interval]
this_interval_means_sequence = this_means_sequence[interval]
this_interval_sigma_invs_sequence = this_sigma_invs_sequence[interval]
this_interval_means_sequence_full = this_interval_means_sequence[-1]
this_interval_sigma_invs_sequence_full = this_interval_sigma_invs_sequence[-1]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = PF(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_triples, this_interval_sigma_invs_triples, this_interval_means_sequence_full, this_interval_sigma_invs_sequence_full, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
# Store Results:
runtime = EHGM.elapsed_time
print('\n')
print('Runtime:', np.round(runtime,2))
for j in range(num_save):
print(j+1, best_seqs[j].T, best_costs[j], '\n')
out_path = os.path.join(home, 'exacthgm', 'Results', experiment_name, model, str(dataset), str(i))
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_path_seqs = os.path.join(out_path, 'best_sequences.npy')
out_path_costs = os.path.join(out_path, 'best_costs.npy')
out_path_runtime = os.path.join(out_path, 'runtime.npy')
np.save(out_path_seqs, best_seqs)
np.save(out_path_costs, best_costs)
np.save(out_path_runtime, runtime) | search.py | import numpy as np
import pandas as pd
import os
import time
import sys
from build_arrays import *
from features import *
from util import *
from config import *
np.set_printoptions(suppress=True)
if __name__ == '__main__':
# Load chosen worm & model information:
home = config['home']
dataset = config['dataset']
seed_nuclei = config['seed_nuclei']
experiment_name = config['experiment_name']
model = config['model']
start_idx = config['start_idx']
end_idx = config['end_idx']
time_limit = config['time_limit']
num_save = config['num_save']
initial_cost = config['initial_cost']
resources_path = os.path.join(home, 'exacthgm', 'Resources', model)
data_path = os.path.join(home, 'exacthgm', 'Data')
key_path = os.path.join(data_path, 'keys.npy')
array_path = os.path.join(data_path, 'arrays.npy')
keys_all = np.load(key_path, allow_pickle=True)
arrays_all = np.load(array_path, allow_pickle=True)
keys = keys_all[dataset]
arrays = arrays_all[dataset]
max_frames = len(keys)
end_idx = min(end_idx, max_frames)
print('\n')
print('Start Index:', start_idx)
print('End Index:', end_idx,'\n')
for i in range(start_idx, end_idx):
t = keys[i]
nuclei_arr = arrays[i]
n = nuclei_arr.shape[0]
m = int(n/2)
# Define the correct lattice
tl = [(i,i+1) for i in range(0,n,2)][::-1]
# Find the point in embryogenesis:
interval = get_time(t,.05)
print('Index:', i)
print('Key:', np.round(t,2))
print('Interval:', interval)
print('n:', n)
print('Dataset:', dataset)
print('Model:', model)
print('Seeded nuclei:', seed_nuclei)
print('Initial cost:', initial_cost, '\n')
pairs = get_P_MP(nuclei_arr)
P = subset_P_seeds(pairs, seed_nuclei, tl)
P = order_tail_pairs(P, nuclei_arr)
n_tails = len(P[0])
best_cost = initial_cost
best_seqs = np.zeros((num_save, m, 2),'int')
best_costs = initial_cost*np.ones(num_save)
if model == 'QAP':
if n == 20:
op_means_widths = os.path.join(resources_path, 'means_widths.npy')
op_sigma_invs_widths = os.path.join(resources_path, 'sigma_invs_widths.npy')
op_means_pairs = os.path.join(resources_path, 'means_pairs.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs.npy')
elif n == 22:
op_means_widths = os.path.join(resources_path, 'means_widths_Q.npy')
op_sigma_invs_widths = os.path.join(resources_path, 'sigma_invs_widths_Q.npy')
op_means_pairs = os.path.join(resources_path, 'means_pairs_Q.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs_Q.npy')
means_widths = np.load(op_means_widths)
sigma_invs_widths = np.load(op_sigma_invs_widths)
means_pairs = np.load(op_means_pairs)
sigma_invs_pairs = np.load(op_sigma_invs_pairs)
# This dataset
this_means_widths = means_widths[dataset]
this_sigma_invs_widths = sigma_invs_widths[dataset]
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_interval_means_widths = this_means_widths[interval][0]
this_interval_sigma_invs_widths = this_sigma_invs_widths[interval][0]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
H = get_H_QAP(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = QAP(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_widths, this_interval_sigma_invs_widths, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'Pairs':
if n == 20:
op_means_pairs = os.path.join(resources_path, 'means_pairs.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs.npy')
op_means_triples = os.path.join(resources_path, 'means_triples.npy')
op_sigma_invs_triples = os.path.join(resources_path, 'sigma_invs_triples.npy')
elif n == 22:
op_means_pairs = os.path.join(resources_path, 'means_pairs_Q.npy')
op_sigma_invs_pairs = os.path.join(resources_path, 'sigma_invs_pairs_Q.npy')
op_means_triples = os.path.join(resources_path, 'means_triples_Q.npy')
op_sigma_invs_triples = os.path.join(resources_path, 'sigma_invs_triples_Q.npy')
means_pairs = np.load(op_means_pairs)
sigma_invs_pairs = np.load(op_sigma_invs_pairs)
means_triples = np.load(op_means_triples)
sigma_invs_triples = np.load(op_sigma_invs_triples)
# This dataset
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_triples = means_triples[dataset]
this_sigma_invs_triples = sigma_invs_triples[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_triples = this_means_triples[interval]
this_interval_sigma_invs_triples = this_sigma_invs_triples[interval]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = Pairs(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_triples, this_interval_sigma_invs_triples, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'Full':
resources_path = os.path.join(home, 'exacthgm', 'Resources')
if n == 20:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence.npy')
ip_mean_cost_estimates = os.path.join(resources_path, 'Full', 'mean_cost_estimates.npy')
ip_sigma_inv_cost_estimates = os.path.join(resources_path, 'Full', 'sigma_inv_cost_estimates.npy')
elif n == 22:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs_Q.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs_Q.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence_Q.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence_Q.npy')
ip_mean_cost_estimates = os.path.join(resources_path, 'Full', 'mean_cost_estimates_Q.npy')
ip_sigma_inv_cost_estimates = os.path.join(resources_path, 'Full', 'sigma_inv_cost_estimates_Q.npy')
means_pairs = np.load(ip_means_pairs)
sigma_invs_pairs = np.load(ip_sigma_invs_pairs)
means_sequence = np.load(ip_means_seq)
sigma_invs_sequence = np.load(ip_sigma_invs_seq)
mean_cost_estimates = np.load(ip_mean_cost_estimates)
sigma_inv_cost_estimates = np.load(ip_sigma_inv_cost_estimates)
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_sequence = means_sequence[dataset]
this_sigma_invs_sequence = sigma_invs_sequence[dataset]
this_mean_cost_est = mean_cost_estimates[dataset]
this_sigma_inv_cost_est = sigma_inv_cost_estimates[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_sequence = this_means_sequence[interval]
this_interval_sigma_invs_sequence = this_sigma_invs_sequence[interval]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = Full(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_sequence, this_interval_sigma_invs_sequence, this_mean_cost_est, this_sigma_inv_cost_est, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
elif model == 'PF':
resources_path = os.path.join(home, 'exacthgm', 'Resources')
if n == 20:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs.npy')
ip_means_triples = os.path.join(resources_path, 'Pairs', 'means_triples.npy')
ip_sigma_invs_triples = os.path.join(resources_path, 'Pairs', 'sigma_invs_triples.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence.npy')
elif n == 22:
ip_means_pairs = os.path.join(resources_path, 'Pairs', 'means_pairs_Q.npy')
ip_sigma_invs_pairs = os.path.join(resources_path, 'Pairs', 'sigma_invs_pairs_Q.npy')
ip_means_triples = os.path.join(resources_path, 'Pairs', 'means_triples_Q.npy')
ip_sigma_invs_triples = os.path.join(resources_path, 'Pairs', 'sigma_invs_triples_Q.npy')
ip_means_seq = os.path.join(resources_path, 'Full', 'means_sequence_Q.npy')
ip_sigma_invs_seq = os.path.join(resources_path, 'Full', 'sigma_invs_sequence_Q.npy')
means_pairs = np.load(ip_means_pairs)
sigma_invs_pairs = np.load(ip_sigma_invs_pairs)
means_triples = np.load(ip_means_triples)
sigma_invs_triples = np.load(ip_sigma_invs_triples)
means_sequence = np.load(ip_means_seq)
sigma_invs_sequence = np.load(ip_sigma_invs_seq)
this_means_pairs = means_pairs[dataset]
this_sigma_invs_pairs = sigma_invs_pairs[dataset]
this_means_triples = means_triples[dataset]
this_sigma_invs_triples = sigma_invs_triples[dataset]
this_means_sequence = means_sequence[dataset]
this_sigma_invs_sequence = sigma_invs_sequence[dataset]
this_interval_means_pairs = this_means_pairs[interval]
this_interval_sigma_invs_pairs = this_sigma_invs_pairs[interval]
this_interval_means_triples = this_means_triples[interval]
this_interval_sigma_invs_triples = this_sigma_invs_triples[interval]
this_interval_means_sequence = this_means_sequence[interval]
this_interval_sigma_invs_sequence = this_sigma_invs_sequence[interval]
this_interval_means_sequence_full = this_interval_means_sequence[-1]
this_interval_sigma_invs_sequence_full = this_interval_sigma_invs_sequence[-1]
H = get_H_Pairs(nuclei_arr, P, this_interval_means_pairs, this_interval_sigma_invs_pairs)
connections = order_H(P,H)
start_time = time.time()
for j in range(n_tails):
run_time = time.time() - start_time
source = P[0][j]
print('Tail:', j+1, 'of', n_tails, '-', source)
print('Best Cost:', best_cost)
HGM = Exact_HGM(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, best_seqs, best_costs)
EHGM = PF(nuclei_arr, H, connections, best_cost, source, start_time, time_limit, num_save, this_interval_means_triples, this_interval_sigma_invs_triples, this_interval_means_sequence_full, this_interval_sigma_invs_sequence_full, best_seqs, best_costs)
EHGM.Begin_Search()
best_seqs = EHGM.best_seqs
best_costs = EHGM.best_costs
best_cost = best_costs[0]
# Store Results:
runtime = EHGM.elapsed_time
print('\n')
print('Runtime:', np.round(runtime,2))
for j in range(num_save):
print(j+1, best_seqs[j].T, best_costs[j], '\n')
out_path = os.path.join(home, 'exacthgm', 'Results', experiment_name, model, str(dataset), str(i))
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_path_seqs = os.path.join(out_path, 'best_sequences.npy')
out_path_costs = os.path.join(out_path, 'best_costs.npy')
out_path_runtime = os.path.join(out_path, 'runtime.npy')
np.save(out_path_seqs, best_seqs)
np.save(out_path_costs, best_costs)
np.save(out_path_runtime, runtime) | 0.241489 | 0.14069 |
import os
import yaml
import launch
import launch_ros
from ament_index_python import get_package_share_directory
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def run_xacro(xacro_file):
"""Run xacro and output a file in the same directory with the same name, w/o a .xacro suffix"""
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('myworkcell_support', 'urdf/workcell.urdf.xacro')
urdf_xml = load_file(run_xacro(xacro_file))
return launch.LaunchDescription([
launch_ros.actions.Node(
name='robot_state_publisher',
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[
{'robot_description': urdf_xml}
]
),
launch_ros.actions.Node(
name='joint_state_publisher_gui',
package='joint_state_publisher_gui',
executable='joint_state_publisher_gui',
output='screen',
),
launch_ros.actions.Node(
name='rviz',
package='rviz2',
executable='rviz2',
output='screen',
)
]) | exercises/4.0/ros2/src/myworkcell_support/launch/urdf.launch.py | import os
import yaml
import launch
import launch_ros
from ament_index_python import get_package_share_directory
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def run_xacro(xacro_file):
"""Run xacro and output a file in the same directory with the same name, w/o a .xacro suffix"""
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('myworkcell_support', 'urdf/workcell.urdf.xacro')
urdf_xml = load_file(run_xacro(xacro_file))
return launch.LaunchDescription([
launch_ros.actions.Node(
name='robot_state_publisher',
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[
{'robot_description': urdf_xml}
]
),
launch_ros.actions.Node(
name='joint_state_publisher_gui',
package='joint_state_publisher_gui',
executable='joint_state_publisher_gui',
output='screen',
),
launch_ros.actions.Node(
name='rviz',
package='rviz2',
executable='rviz2',
output='screen',
)
]) | 0.426799 | 0.223271 |
from __future__ import unicode_literals
import os
import random
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.db.models import Q
# Create your models here.
'''
Below two functions are used to store the imaged with the random file name in the media storage.
'''
def get_extension(filename):
base_name = os.path.basename(filename)
name, ext = os.path.splitext(base_name)
return name, ext
def get_newfilename(instance, filename):
new_filename = random.randint(1, 2232323)
name, ext = get_extension(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename, ext=ext)
return "products/{new_filename}/{final_filename}".format(new_filename=new_filename, final_filename=final_filename)
'''
Custom manager for for relative simple query functions core object query :)
'''
class ProductManager(models.Manager):
def get_by_id(self, pk):
qs = self.get_queryset().filter(pk=pk)
if len(qs) == 1:
return qs.first()
return None
def search(self, query):
return self.get_queryset().filter(Q(title__icontains=query)
| Q(description__icontains=query)
| Q(tag__title__icontains=query)).distinct()
class Product(models.Model): # PRODUCT MODEL
title = models.CharField(max_length=20)
description = models.TextField()
price = models.IntegerField(null=True)
image = models.ImageField(upload_to=get_newfilename, null=True, blank=True)
slug = models.SlugField(blank=True, null=True)
objects = ProductManager()
# Used for reverse URL as products/{slug} is hardcoded.
def get_url(self):
return reverse("products:detail", kwargs={"slug": self.slug})
def __str__(self):
return self.title
'''
As soon as Model load and we are adding the product model object we need to slugify that object so pre-save method is used.
'''
@receiver(pre_save, sender=Product)
def my_callback(sender, instance, *args, **kwargs):
instance.slug = slugify(instance.title) | src/products/models.py | from __future__ import unicode_literals
import os
import random
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.db.models import Q
# Create your models here.
'''
Below two functions are used to store the imaged with the random file name in the media storage.
'''
def get_extension(filename):
base_name = os.path.basename(filename)
name, ext = os.path.splitext(base_name)
return name, ext
def get_newfilename(instance, filename):
new_filename = random.randint(1, 2232323)
name, ext = get_extension(filename)
final_filename = '{new_filename}{ext}'.format(new_filename=new_filename, ext=ext)
return "products/{new_filename}/{final_filename}".format(new_filename=new_filename, final_filename=final_filename)
'''
Custom manager for for relative simple query functions core object query :)
'''
class ProductManager(models.Manager):
def get_by_id(self, pk):
qs = self.get_queryset().filter(pk=pk)
if len(qs) == 1:
return qs.first()
return None
def search(self, query):
return self.get_queryset().filter(Q(title__icontains=query)
| Q(description__icontains=query)
| Q(tag__title__icontains=query)).distinct()
class Product(models.Model): # PRODUCT MODEL
title = models.CharField(max_length=20)
description = models.TextField()
price = models.IntegerField(null=True)
image = models.ImageField(upload_to=get_newfilename, null=True, blank=True)
slug = models.SlugField(blank=True, null=True)
objects = ProductManager()
# Used for reverse URL as products/{slug} is hardcoded.
def get_url(self):
return reverse("products:detail", kwargs={"slug": self.slug})
def __str__(self):
return self.title
'''
As soon as Model load and we are adding the product model object we need to slugify that object so pre-save method is used.
'''
@receiver(pre_save, sender=Product)
def my_callback(sender, instance, *args, **kwargs):
instance.slug = slugify(instance.title) | 0.502686 | 0.118845 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
# State vector:
# 0-3: quaternions (q0, q1, q2, q3)
# 4-6: Velocity - m/sec (North, East, Down)
# 7-9: Position - m (North, East, Down)
# 10-12: Delta Angle bias - rad (X,Y,Z)
# 13-14: Wind Vector - m/sec (North,East)
# 15-17: Earth Magnetic Field Vector - milligauss (North, East, Down)
# 18-20: Body Magnetic Field Vector - milligauss (X,Y,Z)
data = np.genfromtxt('FlowRawOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'flowRadX', 'flowRadY', 'gyroX', 'gyroY', 'flowMsX', 'flowMsY', 'GpsVn', 'GpsVe', 'distance'])
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_title("Flow angular rate estimate")
ax1.set_xlabel('time (s)')
ax1.set_ylabel('flow X (rad)')
ax1.plot(data['time'], data['flowRadX'], color='r', label='flow x')
#ax1.plot(data['time'], data['gyroX'], color='b', label='angular rate x')
#ax1.plot(data['time'], data['predFlowX'], color='g', label='pred flow x')
ax2 = fig.add_subplot(212)
ax2.set_xlabel('time (s)')
ax2.set_ylabel('flow Y (rad)')
ax2.plot(data['time'], data['flowRadY'], color='r', label='flow y')
#ax2.plot(data['time'], data['gyroY'], color='b', label='angular rate y')
#ax2.plot(data['time'], data['predFlowY'], color='g', label='pred flow y')
figvel = plt.figure()
av1 = figvel.add_subplot(311)
av1.set_title("Flow Velocity estimate")
av1.set_xlabel('time (s)')
av1.set_ylabel('north velocity (m/s)')
av1.plot(data['time'], data['flowMsX'], color='r', label='Flow Vn')
av1.plot(data['time'], data['GpsVn'], color='g', label='GPS Vn')
av2 = figvel.add_subplot(312)
av2.set_xlabel('time (s)')
av2.set_ylabel('east velocity (m/s)')
av2.plot(data['time'], data['flowMsY'], color='r', label='Flow Ve')
av2.plot(data['time'], data['GpsVe'], color='g', label='GPS Ve')
av3 = figvel.add_subplot(313)
av3.set_xlabel('time (s)')
av3.set_ylabel('ground distance (m)')
av3.plot(data['time'], data['distance'], color='b', label='distance')
plt.show() | code/plot_flow.py |
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
# State vector:
# 0-3: quaternions (q0, q1, q2, q3)
# 4-6: Velocity - m/sec (North, East, Down)
# 7-9: Position - m (North, East, Down)
# 10-12: Delta Angle bias - rad (X,Y,Z)
# 13-14: Wind Vector - m/sec (North,East)
# 15-17: Earth Magnetic Field Vector - milligauss (North, East, Down)
# 18-20: Body Magnetic Field Vector - milligauss (X,Y,Z)
data = np.genfromtxt('FlowRawOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'flowRadX', 'flowRadY', 'gyroX', 'gyroY', 'flowMsX', 'flowMsY', 'GpsVn', 'GpsVe', 'distance'])
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_title("Flow angular rate estimate")
ax1.set_xlabel('time (s)')
ax1.set_ylabel('flow X (rad)')
ax1.plot(data['time'], data['flowRadX'], color='r', label='flow x')
#ax1.plot(data['time'], data['gyroX'], color='b', label='angular rate x')
#ax1.plot(data['time'], data['predFlowX'], color='g', label='pred flow x')
ax2 = fig.add_subplot(212)
ax2.set_xlabel('time (s)')
ax2.set_ylabel('flow Y (rad)')
ax2.plot(data['time'], data['flowRadY'], color='r', label='flow y')
#ax2.plot(data['time'], data['gyroY'], color='b', label='angular rate y')
#ax2.plot(data['time'], data['predFlowY'], color='g', label='pred flow y')
figvel = plt.figure()
av1 = figvel.add_subplot(311)
av1.set_title("Flow Velocity estimate")
av1.set_xlabel('time (s)')
av1.set_ylabel('north velocity (m/s)')
av1.plot(data['time'], data['flowMsX'], color='r', label='Flow Vn')
av1.plot(data['time'], data['GpsVn'], color='g', label='GPS Vn')
av2 = figvel.add_subplot(312)
av2.set_xlabel('time (s)')
av2.set_ylabel('east velocity (m/s)')
av2.plot(data['time'], data['flowMsY'], color='r', label='Flow Ve')
av2.plot(data['time'], data['GpsVe'], color='g', label='GPS Ve')
av3 = figvel.add_subplot(313)
av3.set_xlabel('time (s)')
av3.set_ylabel('ground distance (m)')
av3.plot(data['time'], data['distance'], color='b', label='distance')
plt.show() | 0.602412 | 0.680772 |
"""The st config."""
import os
import shutil
import sys
import tempfile
import pytest
from mindinsight.datavisual.data_transform.data_manager import DataManager
from mindinsight.lineagemgr.cache_item_updater import LineageCacheItemUpdater
from ....utils import mindspore
from ....utils.mindspore.dataset.engine.serializer_deserializer import SERIALIZED_PIPELINE
sys.modules['mindspore'] = mindspore
BASE_SUMMARY_DIR = tempfile.NamedTemporaryFile(prefix='test_lineage_summary_dir_base_').name
SUMMARY_DIR = os.path.join(BASE_SUMMARY_DIR, 'run1')
SUMMARY_DIR_2 = os.path.join(BASE_SUMMARY_DIR, 'run2')
SUMMARY_DIR_3 = os.path.join(BASE_SUMMARY_DIR, 'except_run')
LINEAGE_DATA_MANAGER = DataManager(BASE_SUMMARY_DIR)
LINEAGE_DATA_MANAGER.register_brief_cache_item_updater(LineageCacheItemUpdater())
COLLECTION_MODULE = 'TestModelLineage'
API_MODULE = 'TestModelApi'
DATASET_GRAPH = SERIALIZED_PIPELINE
def get_module_name(nodeid):
"""Get the module name from nodeid."""
_, module_name, _ = nodeid.split("::")
return module_name
def pytest_collection_modifyitems(items):
"""Modify the execution order."""
split_items = {
COLLECTION_MODULE: [],
API_MODULE: []
}
for item in items:
module_name = get_module_name(item.nodeid)
module_item = split_items.get(module_name)
if module_item is not None:
module_item.append(item)
ordered_items = split_items.get(COLLECTION_MODULE)
item_scenes = []
for item in ordered_items:
scenes = [
marker for marker in item.own_markers
if marker.name.startswith('scene')
]
if scenes:
scene_mark = scenes[0].args[0]
else:
scene_mark = 0
item_scenes.append((item, scene_mark))
sorted_item_scenes = sorted(item_scenes, key=lambda x: x[1])
ordered_items = [item_scene[0] for item_scene in sorted_item_scenes]
ordered_items.extend(split_items.get(API_MODULE))
items[:] = ordered_items
@pytest.fixture(scope="session")
def create_summary_dir():
"""Create summary directory."""
try:
if os.path.exists(BASE_SUMMARY_DIR):
shutil.rmtree(BASE_SUMMARY_DIR)
permissions = os.R_OK | os.W_OK | os.X_OK
mode = permissions << 6
if not os.path.exists(BASE_SUMMARY_DIR):
os.mkdir(BASE_SUMMARY_DIR, mode=mode)
yield
finally:
if os.path.exists(BASE_SUMMARY_DIR):
shutil.rmtree(BASE_SUMMARY_DIR) | tests/st/func/lineagemgr/conftest.py | """The st config."""
import os
import shutil
import sys
import tempfile
import pytest
from mindinsight.datavisual.data_transform.data_manager import DataManager
from mindinsight.lineagemgr.cache_item_updater import LineageCacheItemUpdater
from ....utils import mindspore
from ....utils.mindspore.dataset.engine.serializer_deserializer import SERIALIZED_PIPELINE
sys.modules['mindspore'] = mindspore
BASE_SUMMARY_DIR = tempfile.NamedTemporaryFile(prefix='test_lineage_summary_dir_base_').name
SUMMARY_DIR = os.path.join(BASE_SUMMARY_DIR, 'run1')
SUMMARY_DIR_2 = os.path.join(BASE_SUMMARY_DIR, 'run2')
SUMMARY_DIR_3 = os.path.join(BASE_SUMMARY_DIR, 'except_run')
LINEAGE_DATA_MANAGER = DataManager(BASE_SUMMARY_DIR)
LINEAGE_DATA_MANAGER.register_brief_cache_item_updater(LineageCacheItemUpdater())
COLLECTION_MODULE = 'TestModelLineage'
API_MODULE = 'TestModelApi'
DATASET_GRAPH = SERIALIZED_PIPELINE
def get_module_name(nodeid):
"""Get the module name from nodeid."""
_, module_name, _ = nodeid.split("::")
return module_name
def pytest_collection_modifyitems(items):
"""Modify the execution order."""
split_items = {
COLLECTION_MODULE: [],
API_MODULE: []
}
for item in items:
module_name = get_module_name(item.nodeid)
module_item = split_items.get(module_name)
if module_item is not None:
module_item.append(item)
ordered_items = split_items.get(COLLECTION_MODULE)
item_scenes = []
for item in ordered_items:
scenes = [
marker for marker in item.own_markers
if marker.name.startswith('scene')
]
if scenes:
scene_mark = scenes[0].args[0]
else:
scene_mark = 0
item_scenes.append((item, scene_mark))
sorted_item_scenes = sorted(item_scenes, key=lambda x: x[1])
ordered_items = [item_scene[0] for item_scene in sorted_item_scenes]
ordered_items.extend(split_items.get(API_MODULE))
items[:] = ordered_items
@pytest.fixture(scope="session")
def create_summary_dir():
"""Create summary directory."""
try:
if os.path.exists(BASE_SUMMARY_DIR):
shutil.rmtree(BASE_SUMMARY_DIR)
permissions = os.R_OK | os.W_OK | os.X_OK
mode = permissions << 6
if not os.path.exists(BASE_SUMMARY_DIR):
os.mkdir(BASE_SUMMARY_DIR, mode=mode)
yield
finally:
if os.path.exists(BASE_SUMMARY_DIR):
shutil.rmtree(BASE_SUMMARY_DIR) | 0.391406 | 0.186929 |
from __future__ import (
absolute_import,
unicode_literals,
)
import contextlib
import sqlite3
from typing import (
Any,
Generator,
Optional,
Tuple,
cast,
)
from conformity import fields
import six
from pymetrics.publishers.sql import SqlPublisher
__all__ = (
'SqlitePublisher',
)
class Sqlite3Connection(sqlite3.Connection):
"""
An extension to the base connection. The base class is a pure C class on whose instances you can't call setattr.
This extension enables the use of setattr on connection objects.
"""
@fields.ClassConfigurationSchema.provider(fields.Dictionary(
{
'database_name': fields.UnicodeString(description='The name of the Sqlite database to use'),
'use_uri': fields.Boolean(
description='Whether the database name should be treated as a URI (Python 3+ only)',
),
},
optional_keys=('database_name', 'use_uri'),
))
class SqlitePublisher(SqlPublisher):
"""
A publisher that emits metrics to a Sqlite database file or in-memory database. Especially useful for use in tests
where you need to actually evaluate your metrics.
"""
database_type = 'Sqlite'
exception_type = sqlite3.Error
MEMORY_DATABASE_NAME = ':memory:'
_memory_connection = None # type: Optional[Sqlite3Connection]
def __init__(self, database_name=MEMORY_DATABASE_NAME, use_uri=False): # type: (six.text_type, bool) -> None
if six.PY2 and use_uri:
raise ValueError('Argument use_uri can only be used in Python 3 and higher')
self.database_name = database_name
self.use_uri = use_uri
self.connection = None # type: Optional[Sqlite3Connection]
@staticmethod
@contextlib.contextmanager
def connection_context(connection): # type: (sqlite3.Connection) -> Generator[sqlite3.Cursor, None, None]
with connection:
cursor = None
try:
cursor = connection.cursor()
yield cursor
finally:
if cursor:
cursor.close()
@contextlib.contextmanager
def database_context(self): # type: () -> Generator[sqlite3.Cursor, None, None]
if not self.connection:
raise ValueError('Call to database_context before database connection established')
with self.connection_context(self.connection) as cursor:
yield cursor
def initialize_if_necessary(self): # type: () -> None
if not self.connection:
self.connection = self.get_connection(self.database_name, self.use_uri)
if not getattr(self.connection, '_pymetrics_initialized', None):
with self.database_context() as cursor:
# noinspection SqlNoDataSourceInspection,SqlResolve
cursor.execute("SELECT name FROM sqlite_master WHERE name='pymetrics_counters';")
needs_schema = cursor.fetchone() is None
if needs_schema:
with self.database_context() as cursor:
# noinspection SqlNoDataSourceInspection
cursor.executescript("""
CREATE TABLE pymetrics_counters (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value INTEGER NOT NULL);
CREATE TABLE pymetrics_gauges (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL UNIQUE, metric_value INTEGER NOT NULL);
CREATE TABLE pymetrics_timers (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value REAL NOT NULL);
CREATE TABLE pymetrics_histograms (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value INTEGER NOT NULL);
""")
setattr(self.connection, '_pymetrics_initialized', True)
@classmethod
def get_connection(cls, database_name=MEMORY_DATABASE_NAME, use_uri=False):
# type: (six.text_type, bool) -> Sqlite3Connection
if six.PY2 and use_uri:
raise ValueError('Argument use_uri can only be used in Python 3 and higher')
if database_name == cls.MEMORY_DATABASE_NAME and cls._memory_connection:
# We only want a single in-memory connection per Python instance
return cls._memory_connection
kwargs = {}
if use_uri:
kwargs['uri'] = True
connection = cast(Sqlite3Connection, sqlite3.connect(
database_name,
factory=Sqlite3Connection,
timeout=0.1,
isolation_level=None,
check_same_thread=False,
**kwargs
))
connection.row_factory = sqlite3.Row
if database_name == cls.MEMORY_DATABASE_NAME:
cls._memory_connection = connection
return connection
def execute_statement_multiple_times(self, statement, arguments):
# type: (six.text_type, Generator[Tuple[Any, ...], None, None]) -> None
with self.database_context() as cursor:
cursor.executemany(statement, arguments)
@classmethod
def clear_metrics_from_database(cls, connection): # type: (sqlite3.Connection) -> None
with cls.connection_context(connection) as cursor:
# noinspection SqlNoDataSourceInspection,SqlResolve,SqlWithoutWhere
cursor.executescript("""
DELETE FROM pymetrics_counters;
DELETE FROM pymetrics_gauges;
DELETE FROM pymetrics_timers;
DELETE FROM pymetrics_histograms;
""") | pymetrics/publishers/sqlite.py | from __future__ import (
absolute_import,
unicode_literals,
)
import contextlib
import sqlite3
from typing import (
Any,
Generator,
Optional,
Tuple,
cast,
)
from conformity import fields
import six
from pymetrics.publishers.sql import SqlPublisher
__all__ = (
'SqlitePublisher',
)
class Sqlite3Connection(sqlite3.Connection):
"""
An extension to the base connection. The base class is a pure C class on whose instances you can't call setattr.
This extension enables the use of setattr on connection objects.
"""
@fields.ClassConfigurationSchema.provider(fields.Dictionary(
{
'database_name': fields.UnicodeString(description='The name of the Sqlite database to use'),
'use_uri': fields.Boolean(
description='Whether the database name should be treated as a URI (Python 3+ only)',
),
},
optional_keys=('database_name', 'use_uri'),
))
class SqlitePublisher(SqlPublisher):
"""
A publisher that emits metrics to a Sqlite database file or in-memory database. Especially useful for use in tests
where you need to actually evaluate your metrics.
"""
database_type = 'Sqlite'
exception_type = sqlite3.Error
MEMORY_DATABASE_NAME = ':memory:'
_memory_connection = None # type: Optional[Sqlite3Connection]
def __init__(self, database_name=MEMORY_DATABASE_NAME, use_uri=False): # type: (six.text_type, bool) -> None
if six.PY2 and use_uri:
raise ValueError('Argument use_uri can only be used in Python 3 and higher')
self.database_name = database_name
self.use_uri = use_uri
self.connection = None # type: Optional[Sqlite3Connection]
@staticmethod
@contextlib.contextmanager
def connection_context(connection): # type: (sqlite3.Connection) -> Generator[sqlite3.Cursor, None, None]
with connection:
cursor = None
try:
cursor = connection.cursor()
yield cursor
finally:
if cursor:
cursor.close()
@contextlib.contextmanager
def database_context(self): # type: () -> Generator[sqlite3.Cursor, None, None]
if not self.connection:
raise ValueError('Call to database_context before database connection established')
with self.connection_context(self.connection) as cursor:
yield cursor
def initialize_if_necessary(self): # type: () -> None
if not self.connection:
self.connection = self.get_connection(self.database_name, self.use_uri)
if not getattr(self.connection, '_pymetrics_initialized', None):
with self.database_context() as cursor:
# noinspection SqlNoDataSourceInspection,SqlResolve
cursor.execute("SELECT name FROM sqlite_master WHERE name='pymetrics_counters';")
needs_schema = cursor.fetchone() is None
if needs_schema:
with self.database_context() as cursor:
# noinspection SqlNoDataSourceInspection
cursor.executescript("""
CREATE TABLE pymetrics_counters (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value INTEGER NOT NULL);
CREATE TABLE pymetrics_gauges (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL UNIQUE, metric_value INTEGER NOT NULL);
CREATE TABLE pymetrics_timers (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value REAL NOT NULL);
CREATE TABLE pymetrics_histograms (id INTEGER PRIMARY KEY, metric_name TEXT NOT NULL, metric_value INTEGER NOT NULL);
""")
setattr(self.connection, '_pymetrics_initialized', True)
@classmethod
def get_connection(cls, database_name=MEMORY_DATABASE_NAME, use_uri=False):
# type: (six.text_type, bool) -> Sqlite3Connection
if six.PY2 and use_uri:
raise ValueError('Argument use_uri can only be used in Python 3 and higher')
if database_name == cls.MEMORY_DATABASE_NAME and cls._memory_connection:
# We only want a single in-memory connection per Python instance
return cls._memory_connection
kwargs = {}
if use_uri:
kwargs['uri'] = True
connection = cast(Sqlite3Connection, sqlite3.connect(
database_name,
factory=Sqlite3Connection,
timeout=0.1,
isolation_level=None,
check_same_thread=False,
**kwargs
))
connection.row_factory = sqlite3.Row
if database_name == cls.MEMORY_DATABASE_NAME:
cls._memory_connection = connection
return connection
def execute_statement_multiple_times(self, statement, arguments):
# type: (six.text_type, Generator[Tuple[Any, ...], None, None]) -> None
with self.database_context() as cursor:
cursor.executemany(statement, arguments)
@classmethod
def clear_metrics_from_database(cls, connection): # type: (sqlite3.Connection) -> None
with cls.connection_context(connection) as cursor:
# noinspection SqlNoDataSourceInspection,SqlResolve,SqlWithoutWhere
cursor.executescript("""
DELETE FROM pymetrics_counters;
DELETE FROM pymetrics_gauges;
DELETE FROM pymetrics_timers;
DELETE FROM pymetrics_histograms;
""") | 0.835484 | 0.111434 |
import cdurllib
import urllib.parse
import tempfile
import os
import time
import cdmsobj
import sys
import errno
import shelve
from .error import CDMSError
MethodNotImplemented = "Method not yet implemented"
SchemeNotSupported = "Scheme not supported: "
LockError = "Lock error:"
TimeOutError = "Wait for read completion timed out:"
GlobusNotSupported = "Globus interface not supported"
RequestManagerNotSupported = "Request manager interface not supported (module reqm not found)"
_lock_max_tries = 10 # Number of tries for a lock
_lock_naptime = 1 # Seconds between lock tries
_cache_tempdir = None # Default temporary directory
def lock(filename):
"""
Acquire a file-based lock with the given name.
Usage :
lock(filename)
If the function returns, the lock was acquired successfully.
Notes
-----
This function is UNIX-specific.
It is important to delete the lock via unlock() if the process is
interrupted, otherwise subsequent locks will fail.
"""
path = lockpath(filename)
# Try to acquire a file-based lock
success = 0
tries = 0
while (not success) and (tries < _lock_max_tries):
try:
if cdmsobj._debug:
print(
'Process %d: Trying to acquire lock %s' %
(os.getpid(), path))
fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o666)
# If the open failed because the file already exists, keep trying, otherwise
# reraise the error
except OSError:
if sys.exc_value.errno != errno.EEXIST:
raise
tries = tries + 1
else:
if cdmsobj._debug:
print(
'Process %d: Acquired lock %s after %d tries' %
(os.getpid(), path, tries))
success = 1
break
# Sleep until next retry
if cdmsobj._debug:
print(
'Process %d: Failed to acquire lock %s, sleeping' %
(os.getpid(), path))
time.sleep(_lock_naptime)
# Error if the lock could not be acquired
if not success:
raise CDMSError(LockError + 'Could not acquire a lock on %s' % path)
# The lock succeeded, so just close the file - we don't need to write
# anything here
else:
os.close(fd)
def unlock(filename):
"""
Delete a file-based lock with the given name.
Usage : unlock(filename)
If the function returns, the lock was successfully deleted.
Notes
-----
This function is UNIX-specific.
"""
path = lockpath(filename)
if cdmsobj._debug:
print('Process %d: Unlocking %s' % (os.getpid(), path))
os.unlink(path)
def lockpath(filename):
"""
Generate the pathname of a lock. Creates the directory containing the lock
if necessary.
Usage : lockpath(filename)
"""
global _cache_tempdir
if not _cache_tempdir:
tempfile.mktemp()
_cache_tempdir = os.path.join(tempfile.tempdir, 'cdms')
if not os.path.isdir(_cache_tempdir):
if cdmsobj._debug:
print(
'Process %d: Creating cache directory %s' %
(os.getpid(), _cache_tempdir))
os.mkdir(_cache_tempdir, 0o777)
return os.path.join(_cache_tempdir, filename)
_useWindow = 0 # If true, use a progress dialog
_pythonTransfer = 0
_globusTransfer = 1
_requestManagerTransfer = 2
_transferMethod = _pythonTransfer # Method of transferring files
def useWindow():
"""
Specify that dialog windows should be used if possible.
Do not call this directly, use gui.setProgressParent instead.
See useTTY.
"""
global _useWindow
_useWindow = 1
def useTTY():
"""
Informational messages such as FTP status should be sent to the terminal. See useWindow.
"""
global _useWindow
_useWindow = 0
def useGlobusTransfer():
"""
Specify that file transfers should use the Globus storage API (SC-API). See usePythonTransfer.
"""
global _transferMethod
_transferMethod = _globusTransfer
def usePythonTransfer():
"""
Specify that file transfers should use the Python libraries urllib, ftplib. See useGlobusTransfer.
"""
global _transferMethod
_transferMethod = _pythonTransfer
def useRequestManagerTransfer():
try:
import reqm # noqa
except ImportError:
raise CDMSError(RequestManagerNotSupported)
global _transferMethod
_transferMethod = _requestManagerTransfer
def copyFile(fromURL, toURL, callback=None,
lcpath=None, userid=None, useReplica=1):
"""
Copy file <fromURL> to local file <toURL>.
For FTP transfers, if cache._useWindow is true, display a progress dialog,
otherwise just print progress messages.
For request manager transfers, <lcpath> is the logical collection distinguished name,
Parameters
----------
<userid> : is the string user ID,
<useReplica> : is true if the request manager should search the replica catalog for the
actual file to transfer.
"""
if callback is None:
if _useWindow:
from . import gui
dialogParent = gui.getProgressParent()
dialog = gui.CdProgressDialog(dialogParent, fromURL)
callback = gui.updateProgressGui
else:
callback = cdurllib.sampleReportHook
(scheme, netloc, path, parameters, query,
fragment) = urllib.parse.urlparse(fromURL)
if scheme == 'ftp':
if _transferMethod == _pythonTransfer:
urlopener = cdurllib.CDURLopener()
# In window environment, attach the dialog to the opener. This will
# be passed back to the callback function.
if _useWindow:
urlopener.setUserObject(dialog)
try:
fname, headers = urlopener.retrieve(fromURL, toURL, callback)
except KeyboardInterrupt:
raise # Window or keyboard interrupt: re-raise
except BaseException:
if _useWindow:
dialog.Destroy()
raise
elif _transferMethod == _globusTransfer: # Transfer via Globus SC-API
try:
import globus.storage
except ImportError:
raise CDMSError(GlobusNotSupported)
globus.storage.transfer(fromURL, "file:" + toURL)
else:
raise CDMSError(SchemeNotSupported + scheme)
return
elif _transferMethod == _requestManagerTransfer: # Request manager gransfer
import reqm
import signal
# Define an alarm handler, to poll the request manager
def handler(signum, frame):
pass
# Obtain server reference from environment variable ESG_REQM_REF if
# present
serverRef = os.environ.get('ESG_REQM_REF', '/tmp/esg_rqm.ref')
server = reqm.RequestManager(iorFile=serverRef)
result, token = server.requestFile(
userid, lcpath, path, toURL, useReplica)
server.execute(token)
# Poll the request manager for completion, signalled by estim<=0.0
while True:
signal.signal(signal.SIGALRM, handler)
estim = server.estimate(token)
print('Estimate: ', estim)
if estim <= 0.0:
break
signal.alarm(3) # Number of seconds between polls
signal.pause()
# !!!! Remove this when gsincftp uses the right target name !!!
# oldpath = os.path.join(os.path.dirname(toURL),path)
# os.rename(oldpath,toURL)
# !!!!
return
else:
raise CDMSError(SchemeNotSupported + scheme)
# A simple data cache
class Cache:
indexpath = None # Path of data cache index
def __init__(self):
if self.indexpath is None:
self.indexpath = lockpath(".index")
# This is a kluge to handle the very real possibility that
# a lock was left over from an aborted process. Unfortunately,
# this might also screw up a transfer in progress ...
try:
unlock("index_lock")
except BaseException:
pass
lock("index_lock")
self.index = shelve.open(self.indexpath) # Persistent cache index
try:
# Make index file world writeable
os.chmod(self.indexpath, 0o666)
except BaseException:
pass
self.index.close()
unlock("index_lock")
# Clean up pending read notifications in the cache. This will also
# mess up tranfers in progress...
self.clean()
self.direc = os.path.dirname(self.indexpath) # Cache directory
def get(self, filekey):
"""
Get the path associated with <filekey>, or None if not present.
Parameters
----------
<filekey> : filekey for cache
"""
filekey = str(filekey)
lock("index_lock")
try:
self.index = shelve.open(self.indexpath)
value = self.index[filekey]
except KeyError:
value = None
except BaseException:
self.index.close()
unlock("index_lock")
raise
self.index.close()
unlock("index_lock")
return value
def put(self, filekey, path):
"""
cache[filekey] = path
Parameters
----------
filekey : for cache
"""
filekey = str(filekey)
# Create a semaphore
lock("index_lock")
try:
if cdmsobj._debug:
print(
'Process %d: Adding cache file %s,\n key %s' %
(os.getpid(), path, filekey))
self.index = shelve.open(self.indexpath)
self.index[filekey] = path
except BaseException:
self.index.close()
unlock("index_lock")
raise
self.index.close()
unlock("index_lock")
def deleteEntry(self, filekey):
"""
Delete a cache index entry.
Parameters
----------
<filekey> : filekey for cache
"""
filekey = str(filekey)
# Create a semaphore
lock("index_lock")
self.index = shelve.open(self.indexpath)
try:
del self.index[filekey]
except BaseException:
pass
unlock("index_lock")
def copyFile(self, fromURL, filekey, lcpath=None,
userid=None, useReplica=None):
"""
Copy the file <fromURL> into the cache. Return the result path.
For request manager transfers, lcpath is the logical collection path,
Parameters
----------
<userid> : is the string user ID,
<useReplica> : is true iff the request manager should search the replica
catalog for the actual file to transfer.
"""
# Put a notification into the cache, that this file is being read.
self.put(filekey, "__READ_PENDING__")
# Get a temporary file in the cache
tempdir = tempfile.tempdir
tempfile.tempdir = self.direc
toPath = tempfile.mktemp()
tempfile.tempdir = tempdir
# Copy to the temporary file
try:
copyFile(
fromURL,
toPath,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
# Make cache files world writeable
os.chmod(toPath, 0o666)
except BaseException:
# Remove the notification on error, and the temp file, then
# re-raise
self.deleteEntry(filekey)
if os.path.isfile(toPath):
os.unlink(toPath)
raise
# Add to the cache index
self.put(filekey, toPath)
return toPath
def getFile(self, fromURL, filekey, naptime=5, maxtries=60,
lcpath=None, userid=None, useReplica=None):
"""
Get the file with <fileURL>.
If the file is in the cache, read it.
If another process is transferring it into the cache, wait for the
transfer to complete.
Parameters
----------
<naptime> : is the number of seconds between retries,
<maxtries> : is the maximum number of retries. Otherwise, copy it from the remote file.
<filekey> : is the cache index key. A good choice is (datasetDN, filename) where
datasetDN : is the distinguished name of the dataset, and filename is the name of the file
within the dataset.
For request manager transfers,
<lcpath> : is the logical collection path,
<userid> : is the user string ID,
<useReplica> : is true iff the request manager should search the replica catalog
for the actual file to transfer.
Returns
-------
the path of a file in the cache.
Notes:
The function does not guarantee that the file is still in the cache
by the time it returns.
"""
# If the file is being read into the cache, just wait for it
tempname = self.get(filekey)
# Note: This is not bulletproof: another process could set the cache at
# this point
if tempname is None:
fpath = self.copyFile(
fromURL,
filekey,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
elif tempname == "__READ_PENDING__":
success = 0
for i in range(maxtries):
if cdmsobj._debug:
print(
'Process %d: Waiting for read completion, %s' %
(os.getpid(), repr(filekey)))
time.sleep(naptime)
tempname = self.get(filekey)
# The read failed, or the entry was deleted.
if tempname is None:
fpath = self.copyFile(
fromURL,
filekey,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
# The read is not yet complete
elif tempname == "__READ_PENDING__":
continue
# The read is finished.
else:
fpath = tempname
success = 1
break
if not success:
raise CDMSError(TimeOutError + repr(filekey))
else:
fpath = tempname
if cdmsobj._debug:
print(
'Process %d: Got file %s from cache %s' %
(os.getpid(), fromURL, fpath))
return fpath
def delete(self):
"""
Delete the cache.
"""
if self.indexpath is not None:
lock("index_lock")
self.index = shelve.open(self.indexpath)
for key in list(self.index.keys()):
path = self.index[key]
if path == "__READ_PENDING__":
continue # Don't remove read-pending notifications
try:
if cdmsobj._debug:
print(
'Process %d: Deleting cache file %s' %
(os.getpid(), path))
os.unlink(path)
except BaseException:
pass
del self.index[key]
self.index.close()
unlock("index_lock")
self.indexpath = None
def clean(self):
"""
Clean pending read notifications.
"""
lock("index_lock")
self.index = shelve.open(self.indexpath)
for key in list(self.index.keys()):
path = self.index[key]
if path == "__READ_PENDING__":
del self.index[key]
self.index.close()
unlock("index_lock") | Lib/cache.py | import cdurllib
import urllib.parse
import tempfile
import os
import time
import cdmsobj
import sys
import errno
import shelve
from .error import CDMSError
MethodNotImplemented = "Method not yet implemented"
SchemeNotSupported = "Scheme not supported: "
LockError = "Lock error:"
TimeOutError = "Wait for read completion timed out:"
GlobusNotSupported = "Globus interface not supported"
RequestManagerNotSupported = "Request manager interface not supported (module reqm not found)"
_lock_max_tries = 10 # Number of tries for a lock
_lock_naptime = 1 # Seconds between lock tries
_cache_tempdir = None # Default temporary directory
def lock(filename):
"""
Acquire a file-based lock with the given name.
Usage :
lock(filename)
If the function returns, the lock was acquired successfully.
Notes
-----
This function is UNIX-specific.
It is important to delete the lock via unlock() if the process is
interrupted, otherwise subsequent locks will fail.
"""
path = lockpath(filename)
# Try to acquire a file-based lock
success = 0
tries = 0
while (not success) and (tries < _lock_max_tries):
try:
if cdmsobj._debug:
print(
'Process %d: Trying to acquire lock %s' %
(os.getpid(), path))
fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o666)
# If the open failed because the file already exists, keep trying, otherwise
# reraise the error
except OSError:
if sys.exc_value.errno != errno.EEXIST:
raise
tries = tries + 1
else:
if cdmsobj._debug:
print(
'Process %d: Acquired lock %s after %d tries' %
(os.getpid(), path, tries))
success = 1
break
# Sleep until next retry
if cdmsobj._debug:
print(
'Process %d: Failed to acquire lock %s, sleeping' %
(os.getpid(), path))
time.sleep(_lock_naptime)
# Error if the lock could not be acquired
if not success:
raise CDMSError(LockError + 'Could not acquire a lock on %s' % path)
# The lock succeeded, so just close the file - we don't need to write
# anything here
else:
os.close(fd)
def unlock(filename):
"""
Delete a file-based lock with the given name.
Usage : unlock(filename)
If the function returns, the lock was successfully deleted.
Notes
-----
This function is UNIX-specific.
"""
path = lockpath(filename)
if cdmsobj._debug:
print('Process %d: Unlocking %s' % (os.getpid(), path))
os.unlink(path)
def lockpath(filename):
"""
Generate the pathname of a lock. Creates the directory containing the lock
if necessary.
Usage : lockpath(filename)
"""
global _cache_tempdir
if not _cache_tempdir:
tempfile.mktemp()
_cache_tempdir = os.path.join(tempfile.tempdir, 'cdms')
if not os.path.isdir(_cache_tempdir):
if cdmsobj._debug:
print(
'Process %d: Creating cache directory %s' %
(os.getpid(), _cache_tempdir))
os.mkdir(_cache_tempdir, 0o777)
return os.path.join(_cache_tempdir, filename)
_useWindow = 0 # If true, use a progress dialog
_pythonTransfer = 0
_globusTransfer = 1
_requestManagerTransfer = 2
_transferMethod = _pythonTransfer # Method of transferring files
def useWindow():
"""
Specify that dialog windows should be used if possible.
Do not call this directly, use gui.setProgressParent instead.
See useTTY.
"""
global _useWindow
_useWindow = 1
def useTTY():
"""
Informational messages such as FTP status should be sent to the terminal. See useWindow.
"""
global _useWindow
_useWindow = 0
def useGlobusTransfer():
"""
Specify that file transfers should use the Globus storage API (SC-API). See usePythonTransfer.
"""
global _transferMethod
_transferMethod = _globusTransfer
def usePythonTransfer():
"""
Specify that file transfers should use the Python libraries urllib, ftplib. See useGlobusTransfer.
"""
global _transferMethod
_transferMethod = _pythonTransfer
def useRequestManagerTransfer():
try:
import reqm # noqa
except ImportError:
raise CDMSError(RequestManagerNotSupported)
global _transferMethod
_transferMethod = _requestManagerTransfer
def copyFile(fromURL, toURL, callback=None,
lcpath=None, userid=None, useReplica=1):
"""
Copy file <fromURL> to local file <toURL>.
For FTP transfers, if cache._useWindow is true, display a progress dialog,
otherwise just print progress messages.
For request manager transfers, <lcpath> is the logical collection distinguished name,
Parameters
----------
<userid> : is the string user ID,
<useReplica> : is true if the request manager should search the replica catalog for the
actual file to transfer.
"""
if callback is None:
if _useWindow:
from . import gui
dialogParent = gui.getProgressParent()
dialog = gui.CdProgressDialog(dialogParent, fromURL)
callback = gui.updateProgressGui
else:
callback = cdurllib.sampleReportHook
(scheme, netloc, path, parameters, query,
fragment) = urllib.parse.urlparse(fromURL)
if scheme == 'ftp':
if _transferMethod == _pythonTransfer:
urlopener = cdurllib.CDURLopener()
# In window environment, attach the dialog to the opener. This will
# be passed back to the callback function.
if _useWindow:
urlopener.setUserObject(dialog)
try:
fname, headers = urlopener.retrieve(fromURL, toURL, callback)
except KeyboardInterrupt:
raise # Window or keyboard interrupt: re-raise
except BaseException:
if _useWindow:
dialog.Destroy()
raise
elif _transferMethod == _globusTransfer: # Transfer via Globus SC-API
try:
import globus.storage
except ImportError:
raise CDMSError(GlobusNotSupported)
globus.storage.transfer(fromURL, "file:" + toURL)
else:
raise CDMSError(SchemeNotSupported + scheme)
return
elif _transferMethod == _requestManagerTransfer: # Request manager gransfer
import reqm
import signal
# Define an alarm handler, to poll the request manager
def handler(signum, frame):
pass
# Obtain server reference from environment variable ESG_REQM_REF if
# present
serverRef = os.environ.get('ESG_REQM_REF', '/tmp/esg_rqm.ref')
server = reqm.RequestManager(iorFile=serverRef)
result, token = server.requestFile(
userid, lcpath, path, toURL, useReplica)
server.execute(token)
# Poll the request manager for completion, signalled by estim<=0.0
while True:
signal.signal(signal.SIGALRM, handler)
estim = server.estimate(token)
print('Estimate: ', estim)
if estim <= 0.0:
break
signal.alarm(3) # Number of seconds between polls
signal.pause()
# !!!! Remove this when gsincftp uses the right target name !!!
# oldpath = os.path.join(os.path.dirname(toURL),path)
# os.rename(oldpath,toURL)
# !!!!
return
else:
raise CDMSError(SchemeNotSupported + scheme)
# A simple data cache
class Cache:
indexpath = None # Path of data cache index
def __init__(self):
if self.indexpath is None:
self.indexpath = lockpath(".index")
# This is a kluge to handle the very real possibility that
# a lock was left over from an aborted process. Unfortunately,
# this might also screw up a transfer in progress ...
try:
unlock("index_lock")
except BaseException:
pass
lock("index_lock")
self.index = shelve.open(self.indexpath) # Persistent cache index
try:
# Make index file world writeable
os.chmod(self.indexpath, 0o666)
except BaseException:
pass
self.index.close()
unlock("index_lock")
# Clean up pending read notifications in the cache. This will also
# mess up tranfers in progress...
self.clean()
self.direc = os.path.dirname(self.indexpath) # Cache directory
def get(self, filekey):
"""
Get the path associated with <filekey>, or None if not present.
Parameters
----------
<filekey> : filekey for cache
"""
filekey = str(filekey)
lock("index_lock")
try:
self.index = shelve.open(self.indexpath)
value = self.index[filekey]
except KeyError:
value = None
except BaseException:
self.index.close()
unlock("index_lock")
raise
self.index.close()
unlock("index_lock")
return value
def put(self, filekey, path):
"""
cache[filekey] = path
Parameters
----------
filekey : for cache
"""
filekey = str(filekey)
# Create a semaphore
lock("index_lock")
try:
if cdmsobj._debug:
print(
'Process %d: Adding cache file %s,\n key %s' %
(os.getpid(), path, filekey))
self.index = shelve.open(self.indexpath)
self.index[filekey] = path
except BaseException:
self.index.close()
unlock("index_lock")
raise
self.index.close()
unlock("index_lock")
def deleteEntry(self, filekey):
"""
Delete a cache index entry.
Parameters
----------
<filekey> : filekey for cache
"""
filekey = str(filekey)
# Create a semaphore
lock("index_lock")
self.index = shelve.open(self.indexpath)
try:
del self.index[filekey]
except BaseException:
pass
unlock("index_lock")
def copyFile(self, fromURL, filekey, lcpath=None,
userid=None, useReplica=None):
"""
Copy the file <fromURL> into the cache. Return the result path.
For request manager transfers, lcpath is the logical collection path,
Parameters
----------
<userid> : is the string user ID,
<useReplica> : is true iff the request manager should search the replica
catalog for the actual file to transfer.
"""
# Put a notification into the cache, that this file is being read.
self.put(filekey, "__READ_PENDING__")
# Get a temporary file in the cache
tempdir = tempfile.tempdir
tempfile.tempdir = self.direc
toPath = tempfile.mktemp()
tempfile.tempdir = tempdir
# Copy to the temporary file
try:
copyFile(
fromURL,
toPath,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
# Make cache files world writeable
os.chmod(toPath, 0o666)
except BaseException:
# Remove the notification on error, and the temp file, then
# re-raise
self.deleteEntry(filekey)
if os.path.isfile(toPath):
os.unlink(toPath)
raise
# Add to the cache index
self.put(filekey, toPath)
return toPath
def getFile(self, fromURL, filekey, naptime=5, maxtries=60,
lcpath=None, userid=None, useReplica=None):
"""
Get the file with <fileURL>.
If the file is in the cache, read it.
If another process is transferring it into the cache, wait for the
transfer to complete.
Parameters
----------
<naptime> : is the number of seconds between retries,
<maxtries> : is the maximum number of retries. Otherwise, copy it from the remote file.
<filekey> : is the cache index key. A good choice is (datasetDN, filename) where
datasetDN : is the distinguished name of the dataset, and filename is the name of the file
within the dataset.
For request manager transfers,
<lcpath> : is the logical collection path,
<userid> : is the user string ID,
<useReplica> : is true iff the request manager should search the replica catalog
for the actual file to transfer.
Returns
-------
the path of a file in the cache.
Notes:
The function does not guarantee that the file is still in the cache
by the time it returns.
"""
# If the file is being read into the cache, just wait for it
tempname = self.get(filekey)
# Note: This is not bulletproof: another process could set the cache at
# this point
if tempname is None:
fpath = self.copyFile(
fromURL,
filekey,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
elif tempname == "__READ_PENDING__":
success = 0
for i in range(maxtries):
if cdmsobj._debug:
print(
'Process %d: Waiting for read completion, %s' %
(os.getpid(), repr(filekey)))
time.sleep(naptime)
tempname = self.get(filekey)
# The read failed, or the entry was deleted.
if tempname is None:
fpath = self.copyFile(
fromURL,
filekey,
lcpath=lcpath,
userid=userid,
useReplica=useReplica)
# The read is not yet complete
elif tempname == "__READ_PENDING__":
continue
# The read is finished.
else:
fpath = tempname
success = 1
break
if not success:
raise CDMSError(TimeOutError + repr(filekey))
else:
fpath = tempname
if cdmsobj._debug:
print(
'Process %d: Got file %s from cache %s' %
(os.getpid(), fromURL, fpath))
return fpath
def delete(self):
"""
Delete the cache.
"""
if self.indexpath is not None:
lock("index_lock")
self.index = shelve.open(self.indexpath)
for key in list(self.index.keys()):
path = self.index[key]
if path == "__READ_PENDING__":
continue # Don't remove read-pending notifications
try:
if cdmsobj._debug:
print(
'Process %d: Deleting cache file %s' %
(os.getpid(), path))
os.unlink(path)
except BaseException:
pass
del self.index[key]
self.index.close()
unlock("index_lock")
self.indexpath = None
def clean(self):
"""
Clean pending read notifications.
"""
lock("index_lock")
self.index = shelve.open(self.indexpath)
for key in list(self.index.keys()):
path = self.index[key]
if path == "__READ_PENDING__":
del self.index[key]
self.index.close()
unlock("index_lock") | 0.304352 | 0.087603 |
import numpy as np
from numpy import array, unique, zeros, sort, where, argsort, r_, ones
from numpy import sum as npsum
from datetime import datetime
def RawMigrationDb2AggrRiskDrivers(db,t_start,t_end):
# This function processes the raw database of credit migrations to extract
# the aggregate risk drivers.
# INPUTS
# db :[struct] raw database
# t_start :[string] time window's starting date
# t_end :[string] time window's ending date
# OPS
# dates :[vector] vector of dates corresponding to migrations
# N :[cell] N{t}[i] is the number of obligors with rating i at time dates[t]
# n :[cell] n{t}(i,j) is the cumulative number of transitions between ratings i and j up to time dates[t]
# m :[cell] m{t}(i,j) is the number of transitions occured at time dates[t] between ratings i and j
# n_tot :[vector] n[t] is the total number of transitions up to time dates[t]
# fin_rat :[cell] contains the issuers (first row) with their corresponding final ratings (second row)
## Code
ratings_str = db.ratings
rr_ = len(ratings_str)
db.data[1] = list(map(lambda x: datetime.strptime(x, '%d-%b-%Y'), db.data[1]))
idx_dates = (db.data[1] >= t_start) & (db.data[1] <= t_end)
data_raw = db.data[:,idx_dates]# dataset inside time-window
## Transform "cell" dataset into "double"
# issuers
issuers_raw = data_raw[0]
issuers_d = array(list(map(float,issuers_raw)))
issuers = unique(issuers_d)
s_ = len(issuers)
# dates
dates_raw = data_raw[1]
dates_d = dates_raw
dates = unique(dates_d)
t_ = len(dates)
# ratings
ratings_raw = data_raw[2,:]
ratings_d = zeros((1,len(ratings_raw)),dtype=int)
for r in range(rr_):
idx = ratings_str[r]==ratings_raw
ratings_d[0,idx] = r+1
data_d = r_[issuers_d[np.newaxis,...], dates_d[np.newaxis,...], ratings_d]# dataset in "double" format
## Process data
matrix = np.NaN*ones((s_,t_),dtype=int)
for s in range(s_):
idx_issuer = data_d[0]==issuers[s]
data_temp = data_d[:,idx_issuer]
dates_temp = data_temp[1]
dates_temp,idx_t = sort(dates_temp), argsort(dates_temp)
data_temp = data_temp[:,idx_t]
if len(dates_temp)==1:
idx_1 = where(dates==dates_temp)[0][0]
matrix[s,idx_1:] = data_temp[2]
else:
idx_2 = where(dates==dates_temp[-1])[0][0]
matrix[s,idx_2:] = data_temp[2,-1]
for t in range(1,len(dates_temp)):
idx_1 = where(dates==dates_temp[-t-1])[0][0]
matrix[s,idx_1:idx_2] = data_temp[2,-t-1]
idx_2 = idx_1
## Compute aggregate risk drivers
m = zeros((t_,rr_,rr_))
n = zeros((t_,rr_,rr_))
n_tot = zeros((1,t_))
N = zeros((t_,rr_))
for t in range(t_):
for i in range(rr_):
N[t,i] = npsum(matrix[:,t]==i+1)
if t>0:
for j in range(rr_):
if i!=j:
# number of transitions realized at time t between ratings i and j
m[t,i,j] = npsum((matrix[:,t-1]==i+1)*(matrix[:,t]==j+1))
if t>0:
# number of transitions, between ratings i and j, realized up to time t
n[t] = n[t-1]+m[t]
# total number of transitions up to time t
n_tot[0,t] = npsum(n[t])
## Final ratings
issuers_raw_ = unique(issuers_raw)
fin_rat = {1:zeros(s_),2:{}}
for s in range(s_):
fin_rat[1][s] = issuers_raw_[s]
fin_rat[2][s] = ratings_str[int(matrix[s,-1])-1]
return dates, N, n, m, n_tot, fin_rat | functions_legacy/RawMigrationDb2AggrRiskDrivers.py | import numpy as np
from numpy import array, unique, zeros, sort, where, argsort, r_, ones
from numpy import sum as npsum
from datetime import datetime
def RawMigrationDb2AggrRiskDrivers(db,t_start,t_end):
# This function processes the raw database of credit migrations to extract
# the aggregate risk drivers.
# INPUTS
# db :[struct] raw database
# t_start :[string] time window's starting date
# t_end :[string] time window's ending date
# OPS
# dates :[vector] vector of dates corresponding to migrations
# N :[cell] N{t}[i] is the number of obligors with rating i at time dates[t]
# n :[cell] n{t}(i,j) is the cumulative number of transitions between ratings i and j up to time dates[t]
# m :[cell] m{t}(i,j) is the number of transitions occured at time dates[t] between ratings i and j
# n_tot :[vector] n[t] is the total number of transitions up to time dates[t]
# fin_rat :[cell] contains the issuers (first row) with their corresponding final ratings (second row)
## Code
ratings_str = db.ratings
rr_ = len(ratings_str)
db.data[1] = list(map(lambda x: datetime.strptime(x, '%d-%b-%Y'), db.data[1]))
idx_dates = (db.data[1] >= t_start) & (db.data[1] <= t_end)
data_raw = db.data[:,idx_dates]# dataset inside time-window
## Transform "cell" dataset into "double"
# issuers
issuers_raw = data_raw[0]
issuers_d = array(list(map(float,issuers_raw)))
issuers = unique(issuers_d)
s_ = len(issuers)
# dates
dates_raw = data_raw[1]
dates_d = dates_raw
dates = unique(dates_d)
t_ = len(dates)
# ratings
ratings_raw = data_raw[2,:]
ratings_d = zeros((1,len(ratings_raw)),dtype=int)
for r in range(rr_):
idx = ratings_str[r]==ratings_raw
ratings_d[0,idx] = r+1
data_d = r_[issuers_d[np.newaxis,...], dates_d[np.newaxis,...], ratings_d]# dataset in "double" format
## Process data
matrix = np.NaN*ones((s_,t_),dtype=int)
for s in range(s_):
idx_issuer = data_d[0]==issuers[s]
data_temp = data_d[:,idx_issuer]
dates_temp = data_temp[1]
dates_temp,idx_t = sort(dates_temp), argsort(dates_temp)
data_temp = data_temp[:,idx_t]
if len(dates_temp)==1:
idx_1 = where(dates==dates_temp)[0][0]
matrix[s,idx_1:] = data_temp[2]
else:
idx_2 = where(dates==dates_temp[-1])[0][0]
matrix[s,idx_2:] = data_temp[2,-1]
for t in range(1,len(dates_temp)):
idx_1 = where(dates==dates_temp[-t-1])[0][0]
matrix[s,idx_1:idx_2] = data_temp[2,-t-1]
idx_2 = idx_1
## Compute aggregate risk drivers
m = zeros((t_,rr_,rr_))
n = zeros((t_,rr_,rr_))
n_tot = zeros((1,t_))
N = zeros((t_,rr_))
for t in range(t_):
for i in range(rr_):
N[t,i] = npsum(matrix[:,t]==i+1)
if t>0:
for j in range(rr_):
if i!=j:
# number of transitions realized at time t between ratings i and j
m[t,i,j] = npsum((matrix[:,t-1]==i+1)*(matrix[:,t]==j+1))
if t>0:
# number of transitions, between ratings i and j, realized up to time t
n[t] = n[t-1]+m[t]
# total number of transitions up to time t
n_tot[0,t] = npsum(n[t])
## Final ratings
issuers_raw_ = unique(issuers_raw)
fin_rat = {1:zeros(s_),2:{}}
for s in range(s_):
fin_rat[1][s] = issuers_raw_[s]
fin_rat[2][s] = ratings_str[int(matrix[s,-1])-1]
return dates, N, n, m, n_tot, fin_rat | 0.25303 | 0.572484 |
import mdptoolbox
import mdptoolbox.mdp as mdp
import doctest
import numpy as np
INFINITE_COST = 1e30 # used to denote illegal actions (np.inf does not work)
# The number of states depends on the maximum channel capacity - the maximum number of coins Alice holds initially.
# To keep the number of states at a reasonable level, not all capacities are allowed -
# the only allowed capacites are multiples of capacityMultiplier.
def setGlobalCapacity(newCapacityMultiplier:int, newNumCapacities:int):
global capacityMultiplier, numCapacities, maxCapacity, numStates
capacityMultiplier = newCapacityMultiplier
numCapacities = newNumCapacities
maxCapacity = (numCapacities-1)*capacityMultiplier+1
numStates = maxCapacity*numCapacities
print("MDP Wallet has {} states".format(numStates))
setGlobalCapacity(newCapacityMultiplier = 2, newNumCapacities = 10)
### states:
def toState(capacity:int,balance:int)->int:
"""
INPUT: capacity - total capacity of channel, in units of capacityMultiplier: capacity<numCapacities
balance - balance of Alice in the channel: 0<=balance<=capacity*capacityMultiplier
OUTPUT: a number representing the state of the MDP
>>> setGlobalCapacity(newCapacityMultiplier=1, newNumCapacities=10) # 0,10,...,90
MDP Wallet has 100 states
>>> toState(5,3)
53
>>> setGlobalCapacity(newCapacityMultiplier=10, newNumCapacities=11) # 0,10,...,100
MDP Wallet has 1111 states
>>> toState(5,3)
508
"""
return capacity*maxCapacity + balance
def fromState(state:int)->(int,int):
"""
INPUT: a number representing the state of the MDP
OUTPUT: capacity - total capacity of channel, in units of capacityMultiplier: capacity<numCapacities
balance - balance of Alice in the channel: 0<=balance<=capacity*capacityMultiplier
>>> setGlobalCapacity(newCapacityMultiplier=1, newNumCapacities=10) # 0,10,...,90
MDP Wallet has 100 states
>>> fromState(53)
(5, 3)
>>> setGlobalCapacity(newCapacityMultiplier=10, newNumCapacities=11) # 0,10,...,100
MDP Wallet has 1111 states
>>> fromState(508)
(5, 3)
"""
capacity = state // maxCapacity
balance = state % maxCapacity
return (capacity,balance)
def states():
"""
Generates all possible states as triples: (capacity, balance, stateID)
capacity is given in units of capacityMultiplier: capacity<numCapacities
balance is given in coins: 0<=balance<=capacity*capacityMultiplier
"""
for capacity in range(numCapacities):
for balance in range(capacity*capacityMultiplier+1):
yield (capacity, balance, toState(capacity, balance))
#######
def setTransitionAndReward(fromState,toState,curTransitions,transition,curRewards,reward):
curTransitions[fromState,toState] = transition
curRewards[fromState,toState] = reward
def addTransitionAndReward(fromState,toState,curTransitions,transition,curRewards,reward):
curRewards[fromState,toState] = \
curRewards[fromState,toState] * curTransitions[fromState,toState] + \
reward * (1-curTransitions[fromState,toState])
curTransitions[fromState,toState] = curTransitions[fromState,toState] + transition
def findPolicy(
p = 0.1, # probability that the next send is from Alice to Bob
A = 3, # amount Alice sends to Bob
B = 1, # amount Bob sends to Alicec
txCost = 1, # Cost of a blockchain transaction
txsPerReset = 1, # Number of blockchain transactions required for a channel reset
interest = 0.001,
discount = None
):
if not discount: discount = 1 / (1+interest)
rtCost = txCost*txsPerReset
# STEP A: construct the matrices of transitions and rewards.
# Each list below will have one matrix per action; each matrix is numStates x numStates.
transitions = []
rewards = []
# ACTION 0: ALICE SENDS THROUGH BLOCKCHAIN
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
# Case #1: Alice sends through the blockchain - channel does not change:
setTransitionAndReward(state,state, curTransitions,p, curRewards,-txCost-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
# ACTION 1: ALICE SENDS THROUGH CHANNEL - NO RESET
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
if balance < A: # Alice's balance is too low - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
else:
# Case #1: Alice sends through channel:
setTransitionAndReward(state,state, curTransitions,0, curRewards,-channelCost)
setTransitionAndReward(state, toState(capacity,balance-A), curTransitions,p, curRewards,-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
# ACTIONS 2...numCapacities: ALICE SENDS THROUGH CHANNEL AFTER RESETTING IT TO "(action,0)"
# NOTE: RESET OCCURS ONLY IF ALICE SENDS!
for action in range(2,numCapacities):
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
capacityAfterReset = action
balanceAfterReset = capacityAfterReset*capacityMultiplier
if balanceAfterReset < A: # Alice's balance after pouring is too low for sending - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
elif capacityAfterReset >= numCapacities: # Channel capacity after reset is too high - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
else:
# Case #1: Alice resets and sends through the channel:
setTransitionAndReward(state,state, curTransitions,0, curRewards,-channelCost)
setTransitionAndReward(state, toState(capacityAfterReset, balanceAfterReset-A), curTransitions,p, curRewards,-rtCost-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
print("transitions",len(transitions),"x",transitions[0].shape, "\n")
print("rewards",len(rewards),"x",rewards[0].shape)
# STEP B: solve the MDP:
solver = mdp.ValueIteration(transitions, rewards, discount)
solver.run()
return (solver.policy,solver.V)
def actionToString(action):
if action==0:
return "blockchain"
elif action==1:
return "channel"
else:
return "reset "+str(action*capacityMultiplier)
def policyToHTML(policy,value):
htmlHeading = (
#"<h2>Alice's policy for next send [expected value]</h2>"
"<h2>Alice's policy for next send</h2>\n<p>Discounted cost = {:0.2f}</p>\n".format(value[toState(0,0)])
)
htmlTable = "<table border='1' padding='1'>\n"
htmlHeaderRow = " <tr><th> Alice's balance →<br/>Channel capacity ↓</th>\n"
for balance in range(maxCapacity+1):
htmlHeaderRow += " <td>"+str(balance)+"</td>\n"
htmlHeaderRow += " </tr>\n"
htmlTable += htmlHeaderRow
for capacity in range(numCapacities):
capacityCoins = capacity*capacityMultiplier
htmlRow = " <tr><th>"+str(capacityCoins)+"</th>\n"
for balance in range(capacityCoins+1):
state = toState(capacity,balance)
action = policy[state]
#htmlRow += " <td>{} [{:0.2f}]</td>\n".format(actionToString(action), value[state])
htmlRow += " <td>{}</td>\n".format(actionToString(action))
htmlRow += " </tr>\n"
htmlTable += htmlRow
htmlTable += ' </table>'
return htmlHeading + htmlTable
if __name__=="__main__":
import doctest
doctest.testmod()
print("Doctest OK!")
setGlobalCapacity(
newNumCapacities = 10, # Number of different capacities.
newCapacityMultiplier = 2 # The multiplier of the capacities. E.g, with multiplier 10, the capacities are 0,10,20,...
)
(policy,value) = findPolicy(
p = 0.1, # probability that the next send is from Alice to Bob
A = 4, # amount Alice sends to Bob
B = 1, # amount Bob sends to Alicec
txCost = 2, # Cost of a blockchain transaction
txsPerReset = 1, # num of blockchain transactions required for channel reset
interest = 0.001, # interest rate - cost of locking money in a channel
discount = 0.999 # discount factor for MDP calculations.
)
print(policyToHTML(policy,value)) | old/mdpwallet.py | import mdptoolbox
import mdptoolbox.mdp as mdp
import doctest
import numpy as np
INFINITE_COST = 1e30 # used to denote illegal actions (np.inf does not work)
# The number of states depends on the maximum channel capacity - the maximum number of coins Alice holds initially.
# To keep the number of states at a reasonable level, not all capacities are allowed -
# the only allowed capacites are multiples of capacityMultiplier.
def setGlobalCapacity(newCapacityMultiplier:int, newNumCapacities:int):
global capacityMultiplier, numCapacities, maxCapacity, numStates
capacityMultiplier = newCapacityMultiplier
numCapacities = newNumCapacities
maxCapacity = (numCapacities-1)*capacityMultiplier+1
numStates = maxCapacity*numCapacities
print("MDP Wallet has {} states".format(numStates))
setGlobalCapacity(newCapacityMultiplier = 2, newNumCapacities = 10)
### states:
def toState(capacity:int,balance:int)->int:
"""
INPUT: capacity - total capacity of channel, in units of capacityMultiplier: capacity<numCapacities
balance - balance of Alice in the channel: 0<=balance<=capacity*capacityMultiplier
OUTPUT: a number representing the state of the MDP
>>> setGlobalCapacity(newCapacityMultiplier=1, newNumCapacities=10) # 0,10,...,90
MDP Wallet has 100 states
>>> toState(5,3)
53
>>> setGlobalCapacity(newCapacityMultiplier=10, newNumCapacities=11) # 0,10,...,100
MDP Wallet has 1111 states
>>> toState(5,3)
508
"""
return capacity*maxCapacity + balance
def fromState(state:int)->(int,int):
"""
INPUT: a number representing the state of the MDP
OUTPUT: capacity - total capacity of channel, in units of capacityMultiplier: capacity<numCapacities
balance - balance of Alice in the channel: 0<=balance<=capacity*capacityMultiplier
>>> setGlobalCapacity(newCapacityMultiplier=1, newNumCapacities=10) # 0,10,...,90
MDP Wallet has 100 states
>>> fromState(53)
(5, 3)
>>> setGlobalCapacity(newCapacityMultiplier=10, newNumCapacities=11) # 0,10,...,100
MDP Wallet has 1111 states
>>> fromState(508)
(5, 3)
"""
capacity = state // maxCapacity
balance = state % maxCapacity
return (capacity,balance)
def states():
"""
Generates all possible states as triples: (capacity, balance, stateID)
capacity is given in units of capacityMultiplier: capacity<numCapacities
balance is given in coins: 0<=balance<=capacity*capacityMultiplier
"""
for capacity in range(numCapacities):
for balance in range(capacity*capacityMultiplier+1):
yield (capacity, balance, toState(capacity, balance))
#######
def setTransitionAndReward(fromState,toState,curTransitions,transition,curRewards,reward):
curTransitions[fromState,toState] = transition
curRewards[fromState,toState] = reward
def addTransitionAndReward(fromState,toState,curTransitions,transition,curRewards,reward):
curRewards[fromState,toState] = \
curRewards[fromState,toState] * curTransitions[fromState,toState] + \
reward * (1-curTransitions[fromState,toState])
curTransitions[fromState,toState] = curTransitions[fromState,toState] + transition
def findPolicy(
p = 0.1, # probability that the next send is from Alice to Bob
A = 3, # amount Alice sends to Bob
B = 1, # amount Bob sends to Alicec
txCost = 1, # Cost of a blockchain transaction
txsPerReset = 1, # Number of blockchain transactions required for a channel reset
interest = 0.001,
discount = None
):
if not discount: discount = 1 / (1+interest)
rtCost = txCost*txsPerReset
# STEP A: construct the matrices of transitions and rewards.
# Each list below will have one matrix per action; each matrix is numStates x numStates.
transitions = []
rewards = []
# ACTION 0: ALICE SENDS THROUGH BLOCKCHAIN
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
# Case #1: Alice sends through the blockchain - channel does not change:
setTransitionAndReward(state,state, curTransitions,p, curRewards,-txCost-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
# ACTION 1: ALICE SENDS THROUGH CHANNEL - NO RESET
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
if balance < A: # Alice's balance is too low - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
else:
# Case #1: Alice sends through channel:
setTransitionAndReward(state,state, curTransitions,0, curRewards,-channelCost)
setTransitionAndReward(state, toState(capacity,balance-A), curTransitions,p, curRewards,-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
# ACTIONS 2...numCapacities: ALICE SENDS THROUGH CHANNEL AFTER RESETTING IT TO "(action,0)"
# NOTE: RESET OCCURS ONLY IF ALICE SENDS!
for action in range(2,numCapacities):
curTransitions = np.identity(numStates)
curRewards = np.zeros((numStates,numStates))
for (capacity,balance,state) in states():
capacityCoins = capacity*capacityMultiplier
channelCost = interest*capacityCoins
capacityAfterReset = action
balanceAfterReset = capacityAfterReset*capacityMultiplier
if balanceAfterReset < A: # Alice's balance after pouring is too low for sending - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
elif capacityAfterReset >= numCapacities: # Channel capacity after reset is too high - state unchanged and Alice goes to hell
setTransitionAndReward(state,state, curTransitions,1.0, curRewards,-INFINITE_COST)
else:
# Case #1: Alice resets and sends through the channel:
setTransitionAndReward(state,state, curTransitions,0, curRewards,-channelCost)
setTransitionAndReward(state, toState(capacityAfterReset, balanceAfterReset-A), curTransitions,p, curRewards,-rtCost-channelCost)
# Case #2: Bob sends:
if capacityCoins-balance < B: # Bob's balance is too low - will always use blockchain - channel unchanged
addTransitionAndReward(state,state, curTransitions,1-p, curRewards,-channelCost)
else: # Bob will use the channel if he wants to send
addTransitionAndReward(state,toState(capacity,balance+B), curTransitions,1-p, curRewards,-channelCost)
#print(curTransitions.sum(axis=1)) # this should be all ones
transitions.append(curTransitions)
rewards.append(curRewards)
print("transitions",len(transitions),"x",transitions[0].shape, "\n")
print("rewards",len(rewards),"x",rewards[0].shape)
# STEP B: solve the MDP:
solver = mdp.ValueIteration(transitions, rewards, discount)
solver.run()
return (solver.policy,solver.V)
def actionToString(action):
if action==0:
return "blockchain"
elif action==1:
return "channel"
else:
return "reset "+str(action*capacityMultiplier)
def policyToHTML(policy,value):
htmlHeading = (
#"<h2>Alice's policy for next send [expected value]</h2>"
"<h2>Alice's policy for next send</h2>\n<p>Discounted cost = {:0.2f}</p>\n".format(value[toState(0,0)])
)
htmlTable = "<table border='1' padding='1'>\n"
htmlHeaderRow = " <tr><th> Alice's balance →<br/>Channel capacity ↓</th>\n"
for balance in range(maxCapacity+1):
htmlHeaderRow += " <td>"+str(balance)+"</td>\n"
htmlHeaderRow += " </tr>\n"
htmlTable += htmlHeaderRow
for capacity in range(numCapacities):
capacityCoins = capacity*capacityMultiplier
htmlRow = " <tr><th>"+str(capacityCoins)+"</th>\n"
for balance in range(capacityCoins+1):
state = toState(capacity,balance)
action = policy[state]
#htmlRow += " <td>{} [{:0.2f}]</td>\n".format(actionToString(action), value[state])
htmlRow += " <td>{}</td>\n".format(actionToString(action))
htmlRow += " </tr>\n"
htmlTable += htmlRow
htmlTable += ' </table>'
return htmlHeading + htmlTable
if __name__=="__main__":
import doctest
doctest.testmod()
print("Doctest OK!")
setGlobalCapacity(
newNumCapacities = 10, # Number of different capacities.
newCapacityMultiplier = 2 # The multiplier of the capacities. E.g, with multiplier 10, the capacities are 0,10,20,...
)
(policy,value) = findPolicy(
p = 0.1, # probability that the next send is from Alice to Bob
A = 4, # amount Alice sends to Bob
B = 1, # amount Bob sends to Alicec
txCost = 2, # Cost of a blockchain transaction
txsPerReset = 1, # num of blockchain transactions required for channel reset
interest = 0.001, # interest rate - cost of locking money in a channel
discount = 0.999 # discount factor for MDP calculations.
)
print(policyToHTML(policy,value)) | 0.538012 | 0.462655 |
import os
from typing import List, Callable, Union
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
tf.disable_eager_execution() # tensorflow-hub is based on v1 of tf which doesnot support eager mode
class USE(object):
def __init__(self, cache_path):
super(USE, self).__init__()
os.environ['TFHUB_CACHE_DIR'] = cache_path
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3" # note there are version 4 and 5 already, but this is what the paper used
self.embed = hub.Module(module_url)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.build_graph()
self.sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
def build_graph(self):
self.sts_input1 = tf.placeholder(tf.string, shape=(None))
self.sts_input2 = tf.placeholder(tf.string, shape=(None))
sts_encode1 = tf.nn.l2_normalize(self.embed(self.sts_input1), axis=1)
sts_encode2 = tf.nn.l2_normalize(self.embed(self.sts_input2), axis=1)
self.cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)
clip_cosine_similarities = tf.clip_by_value(self.cosine_similarities, -1.0, 1.0)
self.sim_scores = 1.0 - tf.acos(clip_cosine_similarities)
def semantic_sim(self, sents1: List[str], sents2: List[str]) -> np.ndarray:
"""
Either two list with n strings each, in which case it will compute the similarity between each respective pair and return an array
of length n, or sent1 must be a list of length 1, in which case it will compute the similarity between the string in it to each of
the string in sents2
:return: since it does cosine similarity, the results are in [-1, 1] where 1 is identical and -1 very dissimilar. note that values
such as 0.5 are still very high
"""
scores = self.sess.run(
[self.sim_scores],
feed_dict={
self.sts_input1: sents1,
self.sts_input2: sents2,
})
return scores[0]
_use_model = None
def get_semantic_sim_predictor(tfhub_cache_path) -> Callable[[List[str], List[str]], np.ndarray]:
global _use_model
if _use_model is None:
if tfhub_cache_path is None:
if not os.environ.get('TFHUB_CACHE_DIR', ''):
print('Please initialize semantic sim predictor with a valid path to tfhub cache dir')
exit(1)
tfhub_cache_path = os.environ['TFHUB_CACHE_DIR']
print(f'Loading USE model (cache_dir={tfhub_cache_path})... ')
_use_model = USE(tfhub_cache_path)
print('Done loading USE model!')
return _use_model.semantic_sim
if __name__ == '__main__':
sent1 = 'Hello there! my name is Maor and I want to understand how this works but it requires a lot of words for some reason'
sent2 = 'Hello there! my name is Maor and I desire to comprehend how this operates but it needs many words for some reason'
sent3 = 'This sentence has no relation whatsoever to the previous two'
sent4 = 'sfkghkdfhg kjshdf gjkhsdfkgfn aldhnfg sdnf gjlnsdlf gfnskdnfgjkn sd;fn gksdf gdg'
use_model = USE('/media/maor/Data/data/tfhub_cache')
print(use_model.semantic_sim([sent1], [sent1, sent2, sent3, sent4])) | attacks/sem_sim_model.py |
import os
from typing import List, Callable, Union
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
tf.disable_eager_execution() # tensorflow-hub is based on v1 of tf which doesnot support eager mode
class USE(object):
def __init__(self, cache_path):
super(USE, self).__init__()
os.environ['TFHUB_CACHE_DIR'] = cache_path
module_url = "https://tfhub.dev/google/universal-sentence-encoder-large/3" # note there are version 4 and 5 already, but this is what the paper used
self.embed = hub.Module(module_url)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.build_graph()
self.sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
def build_graph(self):
self.sts_input1 = tf.placeholder(tf.string, shape=(None))
self.sts_input2 = tf.placeholder(tf.string, shape=(None))
sts_encode1 = tf.nn.l2_normalize(self.embed(self.sts_input1), axis=1)
sts_encode2 = tf.nn.l2_normalize(self.embed(self.sts_input2), axis=1)
self.cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)
clip_cosine_similarities = tf.clip_by_value(self.cosine_similarities, -1.0, 1.0)
self.sim_scores = 1.0 - tf.acos(clip_cosine_similarities)
def semantic_sim(self, sents1: List[str], sents2: List[str]) -> np.ndarray:
"""
Either two list with n strings each, in which case it will compute the similarity between each respective pair and return an array
of length n, or sent1 must be a list of length 1, in which case it will compute the similarity between the string in it to each of
the string in sents2
:return: since it does cosine similarity, the results are in [-1, 1] where 1 is identical and -1 very dissimilar. note that values
such as 0.5 are still very high
"""
scores = self.sess.run(
[self.sim_scores],
feed_dict={
self.sts_input1: sents1,
self.sts_input2: sents2,
})
return scores[0]
_use_model = None
def get_semantic_sim_predictor(tfhub_cache_path) -> Callable[[List[str], List[str]], np.ndarray]:
global _use_model
if _use_model is None:
if tfhub_cache_path is None:
if not os.environ.get('TFHUB_CACHE_DIR', ''):
print('Please initialize semantic sim predictor with a valid path to tfhub cache dir')
exit(1)
tfhub_cache_path = os.environ['TFHUB_CACHE_DIR']
print(f'Loading USE model (cache_dir={tfhub_cache_path})... ')
_use_model = USE(tfhub_cache_path)
print('Done loading USE model!')
return _use_model.semantic_sim
if __name__ == '__main__':
sent1 = 'Hello there! my name is Maor and I want to understand how this works but it requires a lot of words for some reason'
sent2 = 'Hello there! my name is Maor and I desire to comprehend how this operates but it needs many words for some reason'
sent3 = 'This sentence has no relation whatsoever to the previous two'
sent4 = 'sfkghkdfhg kjshdf gjkhsdfkgfn aldhnfg sdnf gjlnsdlf gfnskdnfgjkn sd;fn gksdf gdg'
use_model = USE('/media/maor/Data/data/tfhub_cache')
print(use_model.semantic_sim([sent1], [sent1, sent2, sent3, sent4])) | 0.807954 | 0.350116 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Radgroupcheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupname', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radgroupcheck',
'managed': False,
},
),
migrations.CreateModel(
name='Nas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasname', models.TextField()),
('shortname', models.TextField()),
('type', models.TextField()),
('ports', models.IntegerField(blank=True, null=True)),
('secret', models.TextField()),
('server', models.TextField(blank=True, null=True)),
('community', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'nas',
},
),
migrations.CreateModel(
name='Radacct',
fields=[
('radacctid', models.BigAutoField(primary_key=True, serialize=False)),
('acctsessionid', models.TextField()),
('acctuniqueid', models.TextField(unique=True)),
('username', models.TextField(blank=True, null=True)),
('realm', models.TextField(blank=True, null=True)),
('nasipaddress', models.GenericIPAddressField()),
('nasportid', models.TextField(blank=True, null=True)),
('nasporttype', models.TextField(blank=True, null=True)),
('acctstarttime', models.DateTimeField(blank=True, null=True)),
('acctupdatetime', models.DateTimeField(blank=True, null=True)),
('acctstoptime', models.DateTimeField(blank=True, null=True)),
('acctinterval', models.BigIntegerField(blank=True, null=True)),
('acctsessiontime', models.BigIntegerField(blank=True, null=True)),
('acctauthentic', models.TextField(blank=True, null=True)),
('connectinfo_start', models.TextField(blank=True, null=True)),
('connectinfo_stop', models.TextField(blank=True, null=True)),
('acctinputoctets', models.BigIntegerField(blank=True, null=True)),
('acctoutputoctets', models.BigIntegerField(blank=True, null=True)),
('calledstationid', models.TextField(blank=True, null=True)),
('callingstationid', models.TextField(blank=True, null=True)),
('acctterminatecause', models.TextField(blank=True, null=True)),
('servicetype', models.TextField(blank=True, null=True)),
('framedprotocol', models.TextField(blank=True, null=True)),
('framedipaddress', models.GenericIPAddressField(blank=True, null=True)),
],
options={
'db_table': 'radacct',
},
),
migrations.CreateModel(
name='Radgroupreply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupname', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radgroupreply',
},
),
migrations.CreateModel(
name='Radpostauth',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('username', models.TextField()),
('pass_field', models.TextField(blank=True, db_column='pass', null=True)),
('reply', models.TextField(blank=True, null=True)),
('calledstationid', models.TextField(blank=True, null=True)),
('callingstationid', models.TextField(blank=True, null=True)),
('authdate', models.DateTimeField()),
],
options={
'db_table': 'radpostauth',
},
),
migrations.CreateModel(
name='Radreply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radreply',
},
),
migrations.CreateModel(
name='Radusergroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('groupname', models.TextField()),
('priority', models.IntegerField()),
],
options={
'db_table': 'radusergroup',
},
),
migrations.CreateModel(
name='Radcheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True)),
('username', models.CharField(max_length=64)),
('attribute', models.CharField(choices=[('NT-Password', 'hashed'), ('Cleartext-Password', 'cleartext')], max_length=64)),
('op', models.CharField(max_length=2)),
('value', models.CharField(max_length=253)),
('mac', models.CharField(blank=True, max_length=50, null=True)),
('description', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'radcheck',
},
),
] | stations/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Radgroupcheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupname', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radgroupcheck',
'managed': False,
},
),
migrations.CreateModel(
name='Nas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasname', models.TextField()),
('shortname', models.TextField()),
('type', models.TextField()),
('ports', models.IntegerField(blank=True, null=True)),
('secret', models.TextField()),
('server', models.TextField(blank=True, null=True)),
('community', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'nas',
},
),
migrations.CreateModel(
name='Radacct',
fields=[
('radacctid', models.BigAutoField(primary_key=True, serialize=False)),
('acctsessionid', models.TextField()),
('acctuniqueid', models.TextField(unique=True)),
('username', models.TextField(blank=True, null=True)),
('realm', models.TextField(blank=True, null=True)),
('nasipaddress', models.GenericIPAddressField()),
('nasportid', models.TextField(blank=True, null=True)),
('nasporttype', models.TextField(blank=True, null=True)),
('acctstarttime', models.DateTimeField(blank=True, null=True)),
('acctupdatetime', models.DateTimeField(blank=True, null=True)),
('acctstoptime', models.DateTimeField(blank=True, null=True)),
('acctinterval', models.BigIntegerField(blank=True, null=True)),
('acctsessiontime', models.BigIntegerField(blank=True, null=True)),
('acctauthentic', models.TextField(blank=True, null=True)),
('connectinfo_start', models.TextField(blank=True, null=True)),
('connectinfo_stop', models.TextField(blank=True, null=True)),
('acctinputoctets', models.BigIntegerField(blank=True, null=True)),
('acctoutputoctets', models.BigIntegerField(blank=True, null=True)),
('calledstationid', models.TextField(blank=True, null=True)),
('callingstationid', models.TextField(blank=True, null=True)),
('acctterminatecause', models.TextField(blank=True, null=True)),
('servicetype', models.TextField(blank=True, null=True)),
('framedprotocol', models.TextField(blank=True, null=True)),
('framedipaddress', models.GenericIPAddressField(blank=True, null=True)),
],
options={
'db_table': 'radacct',
},
),
migrations.CreateModel(
name='Radgroupreply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('groupname', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radgroupreply',
},
),
migrations.CreateModel(
name='Radpostauth',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('username', models.TextField()),
('pass_field', models.TextField(blank=True, db_column='pass', null=True)),
('reply', models.TextField(blank=True, null=True)),
('calledstationid', models.TextField(blank=True, null=True)),
('callingstationid', models.TextField(blank=True, null=True)),
('authdate', models.DateTimeField()),
],
options={
'db_table': 'radpostauth',
},
),
migrations.CreateModel(
name='Radreply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('attribute', models.TextField()),
('op', models.CharField(max_length=2)),
('value', models.TextField()),
],
options={
'db_table': 'radreply',
},
),
migrations.CreateModel(
name='Radusergroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('groupname', models.TextField()),
('priority', models.IntegerField()),
],
options={
'db_table': 'radusergroup',
},
),
migrations.CreateModel(
name='Radcheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True)),
('username', models.CharField(max_length=64)),
('attribute', models.CharField(choices=[('NT-Password', 'hashed'), ('Cleartext-Password', 'cleartext')], max_length=64)),
('op', models.CharField(max_length=2)),
('value', models.CharField(max_length=253)),
('mac', models.CharField(blank=True, max_length=50, null=True)),
('description', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'radcheck',
},
),
] | 0.533397 | 0.254104 |
import sys
import textwrap
import argparse
import networkx as nx
from config import *
import re
import os
import math
import random
import numpy as np
def parse_node_name(node_name, max_router, max_host):
try:
val = int(node_name[:-1])
if(node_name[-1] == 'r'):
if(val > max_router):
max_router = val
return ("router[" + str(val) + "]", max_router, max_host)
if(node_name[-1] == 'e'):
if(val > max_host):
max_host = val
return ("host[" + str(val) + "]", max_router, max_host)
return -1
except:
return -1
# take the topology file in a specific format and write it to a ned file
def write_ned_file(topo_filename, output_filename, network_name, routing_alg):
# topo_filename must be a text file where each line contains the ids of two neighbouring nodes that
# have a payment channel between them, relative delays in each direction, initial balance on each
# end (see sample-topology.txt)
# each line is of form:
# [node1] [node2] [1->2 delay] [2->1 delay] [balance @ 1] [balance @ 2]
topo_file = open(topo_filename).readlines()
outfile = open(output_filename, "w")
# metadata used for forwarding table
neighbor_interfaces = dict()
node_interface_count = dict()
node_used_interface = dict()
linklist = list()
max_val = -1 #used to find number of nodes, assume nodes start at 0 and number consecutively
max_router = -1
max_host = -1
line_num = 0
for line in topo_file:
line_num += 1
# landmark line
if line_num == 1:
continue
if line == "\n":
continue
n1 = parse_node_name(line.split()[0], max_router, max_host)
if(n1 == -1):
print("Bad line1 " + line)
continue
max_router = n1[1]
max_host = n1[2]
n2 = parse_node_name(line.split()[1], max_router, max_host)
if(n2 == -1):
print("Bad line 2" + line)
continue
max_router = n2[1]
max_host = n2[2]
n3 = float(line.split()[2]) # delay going from n1 to n2
n4 = float(line.split()[3]) # delay going from n2 to n1
linklist.append((n1[0], n2[0], n3, n4))
max_router = max_router + 1
max_host = max_host + 1
# generic routerNode and hostNode definition that every network will have
print(routing_alg+"issssssssssssssssssssssssssssssssssssssssssss")
if (routing_alg == 'shortestPath'):
host_node_type = 'hostNodeBase'
router_node_type = 'routerNodeBase'
else:
if routing_alg == 'DCTCPBal' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
host_node_type = 'hostNodeDCTCP'
elif routing_alg == 'DCTCPRate':
host_node_type = 'hostNodePropFairPriceScheme'
else:
host_node_type = 'hostNode' + routing_alg[0].upper() + routing_alg[1:]
if routing_alg == 'landmarkRouting':
router_node_type = 'routerNodeWaterfilling'
elif routing_alg == 'DCTCPRate' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
router_node_type = 'routerNodeDCTCP'
else:
router_node_type = 'routerNode' + routing_alg[0].upper() + routing_alg[1:]
print(router_node_type)
outfile.write("import " + router_node_type + ";\n")
outfile.write("import " + host_node_type + ";\n\n")
# print("<<<<<<<<<<<<<<<<<<<<<<"+network_name+"_" + routing_alg+">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
outfile.write("network " + network_name + "_" + routing_alg + "\n")
outfile.write("{\n")
# This script (meant for a simpler datacenter topology) just assigns the same link delay to all links.
# You need to change this such that the parameter values are instead assigned on a per node basis and
# are read from an additional 'delay' column and 'channel balance' columns in the text file.
outfile.write('\tparameters:\n\t\tdouble linkDelay @unit("s") = default(100us);\n')
outfile.write('\t\tdouble linkDataRate @unit("Gbps") = default(1Gbps);\n')
outfile.write('\tsubmodules:\n')
outfile.write('\t\thost['+str(max_host)+']: ' + host_node_type + ' {} \n')
outfile.write('\t\trouter['+str(max_router)+']: ' + router_node_type + ' {} \n')
outfile.write('\tconnections: \n')
for link in linklist:
a = link[0]
b = link[1]
abDelay = link[2]
baDelay = link[3]
outfile.write('\t\t' + a + '.out++ --> {delay = ' + str(abDelay) +'ms; }')
outfile.write(' --> ' + b + '.in++; \n')
outfile.write('\t\t' + a + '.in++ <-- {delay = ' + str(baDelay) +'ms; }')
outfile.write(' <-- ' + b + '.out++; \n')
outfile.write('}\n')
# generate either a small world or scale free graph
def generate_graph(size, graph_type):
if graph_type == 'random':
G = nx.dense_gnm_random_graph(size, size * 5,seed=SEED)
elif graph_type == 'small_world':
G = nx.watts_strogatz_graph(size, 8, 0.25, seed=SEED)
elif graph_type == 'small_world_sparse':
G = nx.watts_strogatz_graph(size, size/8, 0.25, seed=SEED)
elif graph_type == 'scale_free':
# regular expts
G = nx.barabasi_albert_graph(size, 8, seed=SEED)
# implementation, celer expts - 10 node graph
# G = nx.barabasi_albert_graph(size, 5, seed=12)
elif graph_type == 'scale_free_sparse':
G = nx.barabasi_albert_graph(size, size/8, seed=SEED)
elif graph_type == 'tree':
G = nx.random_tree(size, seed=SEED)
# remove self loops and parallel edges
G.remove_edges_from(G.selfloop_edges())
G = nx.Graph(G)
print('Generated a ', graph_type, ' graph')
print('number of nodes: ', G.number_of_nodes())
print('Number of Edges: ', G.number_of_edges())
print('Number of connected components: ', nx.number_connected_components(G))
return G
# print the output in the desired format for write_ned_file to read
# generate extra end host nodes if need be
# make the first line list of landmarks for this topology
def print_topology_in_format(G, balance_per_channel, delay_per_channel, output_filename, separate_end_hosts,\
randomize_init_bal=False, random_channel_capacity=False, lnd_capacity=False, is_lnd=False, rebalancing_enabled=False):
f1 = open(output_filename, "w+")
end_host_delay = delay_per_channel
offset = G.number_of_nodes()
if (separate_end_hosts == False):
offset = 0
nodes_sorted_by_degree = sorted(G.degree, key=lambda x: x[1], reverse=True)
# generate landmarks based on degree
i = 0
landmarks, current_list = [], []
max_degree = -1
while len(landmarks) < NUM_LANDMARKS and i < len(nodes_sorted_by_degree):
num_remaining = NUM_LANDMARKS - len(landmarks)
if nodes_sorted_by_degree[i][1] == max_degree:
current_list.append(nodes_sorted_by_degree[i][0])
else:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
if max_degree != -1:
landmarks.extend([current_list[x] for x in spaced_indices])
current_list = [nodes_sorted_by_degree[i][0]]
max_degree = nodes_sorted_by_degree[i][1]
i += 1
if len(landmarks) < NUM_LANDMARKS:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
landmarks.extend([current_list[x] for x in spaced_indices])
# make the first line the landmarks and make them all router nodes
for l in landmarks[:NUM_LANDMARKS]:
f1.write(str(l) + "r ")
f1.write("\n")
total_budget = balance_per_channel * len(G.edges())
weights = {e: min(G.degree(e[0]), G.degree(e[1])) for e in G.edges()}
sum_weights = sum(weights.values())
capacity_dict = dict()
# get lnd capacity data
lnd_capacities_graph = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
lnd_capacities = list(nx.get_edge_attributes(lnd_capacities_graph, 'capacity').values())
# write rest of topology
real_rtts = np.loadtxt(LND_FILE_PATH + "ping_times_data")
for e in G.edges():
f1.write(str(e[0]) + "r " + str(e[1]) + "r ")
if not random_channel_capacity and is_lnd and "uniform" not in output_filename:
delay_per_channel = np.random.choice(real_rtts) / 2.0
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
else:
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
if random_channel_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 2:
balance_for_this_channel = round(np.random.normal(balance_per_channel, \
0.75 * balance_per_channel))
elif lnd_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 40:
balance_for_this_channel = round(np.random.choice(lnd_capacities) * \
(balance_per_channel / np.mean(lnd_capacities)))
elif is_lnd and "uniform" not in output_filename:
if "lessScale" in output_filename:
balance_for_this_channel = float(G[e[0]][e[1]]['capacity'] *10 * balance_per_channel)
else:
# print("check blanace")
# base case
balance_for_this_channel = 16*0.00011111*(float(G[e[0]][e[1]]['capacity']))
else:
balance_for_this_channel = balance_per_channel
capacity_dict[e] = balance_for_this_channel
if randomize_init_bal:
one_end_bal = random.randint(1, balance_for_this_channel)
other_end_bal = balance_for_this_channel - one_end_bal
f1.write(str(one_end_bal) + " " + str(other_end_bal) + " ")
else:
f1.write(str(round(balance_for_this_channel/2)) + " " + \
str(round(balance_for_this_channel/2)) + " ")
# *************************Writing Fees to network**************************
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
# generate extra end host nodes
if separate_end_hosts :
for n in G.nodes():
f1.write(str(n) + "e " + str(n) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
if rebalancing_enabled:
f1.write(str(REASONABLE_BALANCE) + " " + str(REASONABLE_ROUTER_BALANCE) + " ")
else:
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
if args.graph_type == "parallel_graph":
for (e,r) in zip([1,3], [0, 2]):
f1.write(str(e) + "e " + str(r) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.close()
nx.set_edge_attributes(G, capacity_dict, 'capacity')
# parse arguments
parser = argparse.ArgumentParser(description="Create arbitrary topologies to run the omnet simulator on")
parser.add_argument('--num-nodes', type=int, dest='num_nodes', help='number of nodes in the graph', default=20)
parser.add_argument('--delay-per-channel', type=int, dest='delay_per_channel', \
help='delay between nodes (ms)', default=30)
parser.add_argument('graph_type', choices=['small_world', 'scale_free', 'hotnets_topo', 'simple_line', 'toy_dctcp', \
'simple_deadlock', 'simple_topologies', 'parallel_graph', 'dag_example', 'lnd_dec4_2018','lnd_dec4_2018lessScale', \
'lnd_dec4_2018_randomCap', 'lnd_dec4_2018_modified', 'lnd_uniform', 'tree', 'random', \
'lnd_july15_2019'], \
help='type of graph (Small world or scale free or custom topology list)', default='small_world')
parser.add_argument('--balance-per-channel', type=int, dest='balance_per_channel', default=100)
parser.add_argument('--topo-filename', dest='topo_filename', type=str, \
help='name of intermediate output file', default="topo.txt")
parser.add_argument('--network-name', type=str, dest='network_name', \
help='name of the output ned filename', default='simpleNet')
parser.add_argument('--separate-end-hosts', action='store_true', \
help='do you need separate end hosts that only send transactions')
parser.add_argument('--randomize-start-bal', type=str, dest='randomize_start_bal', \
help='Do not start from pergect balance, but rather randomize it', default='False')
parser.add_argument('--random-channel-capacity', type=str, dest='random_channel_capacity', \
help='Give channels a random balance between bal/2 and bal', default='False')
parser.add_argument('--lnd-channel-capacity', type=str, dest='lnd_capacity', \
help='Give channels a random balance sampled from lnd', default='False')
parser.add_argument('--rebalancing-enabled', type=str, dest="rebalancing_enabled",\
help="should the end host router channel be reasonably sized", default="false")
routing_alg_list = ['lndBaseline']
args = parser.parse_args()
np.random.seed(SEED)
random.seed(SEED)
# generate graph and print topology and ned file
if args.num_nodes <= 5 and args.graph_type == 'simple_topologies':
if args.num_nodes == 2:
G = two_node_graph
elif args.num_nodes == 3:
G = three_node_graph
elif args.num_nodes == 4:
G = four_node_graph
elif 'line' in args.network_name:
G = five_line_graph
else:
G = five_node_graph
elif args.graph_type in ['small_world', 'scale_free', 'tree', 'random']:
if "sparse" in args.topo_filename:
args.graph_type = args.graph_type + "_sparse"
G = generate_graph(args.num_nodes, args.graph_type)
elif args.graph_type == 'toy_dctcp':
G = toy_dctcp_graph
elif args.graph_type == 'dag_example':
print("generating dag example")
G = dag_example_graph
elif args.graph_type == 'parallel_graph':
G = parallel_graph
elif args.graph_type == 'hotnets_topo':
G = hotnets_topo_graph
elif args.graph_type == 'simple_deadlock':
G = simple_deadlock_graph
args.separate_end_hosts = False
elif args.graph_type.startswith('lnd_'):
G = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
else:
G = simple_line_graph
args.separate_end_hosts = False
args.randomize_start_bal = args.randomize_start_bal == 'true'
args.random_channel_capacity = args.random_channel_capacity == 'true'
args.lnd_capacity = args.lnd_capacity == 'true'
print_topology_in_format(G, args.balance_per_channel, args.delay_per_channel, args.topo_filename, \
args.separate_end_hosts, args.randomize_start_bal, args.random_channel_capacity,\
args.lnd_capacity, args.graph_type.startswith('lnd_'), args.rebalancing_enabled == "true")
network_base = os.path.basename(args.network_name)
for routing_alg in routing_alg_list:
write_ned_file(args.topo_filename, args.network_name + '_' + routing_alg + '.ned', \
network_base, routing_alg) | scripts/create_topo_ned_file.py | import sys
import textwrap
import argparse
import networkx as nx
from config import *
import re
import os
import math
import random
import numpy as np
def parse_node_name(node_name, max_router, max_host):
try:
val = int(node_name[:-1])
if(node_name[-1] == 'r'):
if(val > max_router):
max_router = val
return ("router[" + str(val) + "]", max_router, max_host)
if(node_name[-1] == 'e'):
if(val > max_host):
max_host = val
return ("host[" + str(val) + "]", max_router, max_host)
return -1
except:
return -1
# take the topology file in a specific format and write it to a ned file
def write_ned_file(topo_filename, output_filename, network_name, routing_alg):
# topo_filename must be a text file where each line contains the ids of two neighbouring nodes that
# have a payment channel between them, relative delays in each direction, initial balance on each
# end (see sample-topology.txt)
# each line is of form:
# [node1] [node2] [1->2 delay] [2->1 delay] [balance @ 1] [balance @ 2]
topo_file = open(topo_filename).readlines()
outfile = open(output_filename, "w")
# metadata used for forwarding table
neighbor_interfaces = dict()
node_interface_count = dict()
node_used_interface = dict()
linklist = list()
max_val = -1 #used to find number of nodes, assume nodes start at 0 and number consecutively
max_router = -1
max_host = -1
line_num = 0
for line in topo_file:
line_num += 1
# landmark line
if line_num == 1:
continue
if line == "\n":
continue
n1 = parse_node_name(line.split()[0], max_router, max_host)
if(n1 == -1):
print("Bad line1 " + line)
continue
max_router = n1[1]
max_host = n1[2]
n2 = parse_node_name(line.split()[1], max_router, max_host)
if(n2 == -1):
print("Bad line 2" + line)
continue
max_router = n2[1]
max_host = n2[2]
n3 = float(line.split()[2]) # delay going from n1 to n2
n4 = float(line.split()[3]) # delay going from n2 to n1
linklist.append((n1[0], n2[0], n3, n4))
max_router = max_router + 1
max_host = max_host + 1
# generic routerNode and hostNode definition that every network will have
print(routing_alg+"issssssssssssssssssssssssssssssssssssssssssss")
if (routing_alg == 'shortestPath'):
host_node_type = 'hostNodeBase'
router_node_type = 'routerNodeBase'
else:
if routing_alg == 'DCTCPBal' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
host_node_type = 'hostNodeDCTCP'
elif routing_alg == 'DCTCPRate':
host_node_type = 'hostNodePropFairPriceScheme'
else:
host_node_type = 'hostNode' + routing_alg[0].upper() + routing_alg[1:]
if routing_alg == 'landmarkRouting':
router_node_type = 'routerNodeWaterfilling'
elif routing_alg == 'DCTCPRate' or routing_alg == 'DCTCPQ' or routing_alg == 'TCP' or routing_alg == 'TCPCubic':
router_node_type = 'routerNodeDCTCP'
else:
router_node_type = 'routerNode' + routing_alg[0].upper() + routing_alg[1:]
print(router_node_type)
outfile.write("import " + router_node_type + ";\n")
outfile.write("import " + host_node_type + ";\n\n")
# print("<<<<<<<<<<<<<<<<<<<<<<"+network_name+"_" + routing_alg+">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
outfile.write("network " + network_name + "_" + routing_alg + "\n")
outfile.write("{\n")
# This script (meant for a simpler datacenter topology) just assigns the same link delay to all links.
# You need to change this such that the parameter values are instead assigned on a per node basis and
# are read from an additional 'delay' column and 'channel balance' columns in the text file.
outfile.write('\tparameters:\n\t\tdouble linkDelay @unit("s") = default(100us);\n')
outfile.write('\t\tdouble linkDataRate @unit("Gbps") = default(1Gbps);\n')
outfile.write('\tsubmodules:\n')
outfile.write('\t\thost['+str(max_host)+']: ' + host_node_type + ' {} \n')
outfile.write('\t\trouter['+str(max_router)+']: ' + router_node_type + ' {} \n')
outfile.write('\tconnections: \n')
for link in linklist:
a = link[0]
b = link[1]
abDelay = link[2]
baDelay = link[3]
outfile.write('\t\t' + a + '.out++ --> {delay = ' + str(abDelay) +'ms; }')
outfile.write(' --> ' + b + '.in++; \n')
outfile.write('\t\t' + a + '.in++ <-- {delay = ' + str(baDelay) +'ms; }')
outfile.write(' <-- ' + b + '.out++; \n')
outfile.write('}\n')
# generate either a small world or scale free graph
def generate_graph(size, graph_type):
if graph_type == 'random':
G = nx.dense_gnm_random_graph(size, size * 5,seed=SEED)
elif graph_type == 'small_world':
G = nx.watts_strogatz_graph(size, 8, 0.25, seed=SEED)
elif graph_type == 'small_world_sparse':
G = nx.watts_strogatz_graph(size, size/8, 0.25, seed=SEED)
elif graph_type == 'scale_free':
# regular expts
G = nx.barabasi_albert_graph(size, 8, seed=SEED)
# implementation, celer expts - 10 node graph
# G = nx.barabasi_albert_graph(size, 5, seed=12)
elif graph_type == 'scale_free_sparse':
G = nx.barabasi_albert_graph(size, size/8, seed=SEED)
elif graph_type == 'tree':
G = nx.random_tree(size, seed=SEED)
# remove self loops and parallel edges
G.remove_edges_from(G.selfloop_edges())
G = nx.Graph(G)
print('Generated a ', graph_type, ' graph')
print('number of nodes: ', G.number_of_nodes())
print('Number of Edges: ', G.number_of_edges())
print('Number of connected components: ', nx.number_connected_components(G))
return G
# print the output in the desired format for write_ned_file to read
# generate extra end host nodes if need be
# make the first line list of landmarks for this topology
def print_topology_in_format(G, balance_per_channel, delay_per_channel, output_filename, separate_end_hosts,\
randomize_init_bal=False, random_channel_capacity=False, lnd_capacity=False, is_lnd=False, rebalancing_enabled=False):
f1 = open(output_filename, "w+")
end_host_delay = delay_per_channel
offset = G.number_of_nodes()
if (separate_end_hosts == False):
offset = 0
nodes_sorted_by_degree = sorted(G.degree, key=lambda x: x[1], reverse=True)
# generate landmarks based on degree
i = 0
landmarks, current_list = [], []
max_degree = -1
while len(landmarks) < NUM_LANDMARKS and i < len(nodes_sorted_by_degree):
num_remaining = NUM_LANDMARKS - len(landmarks)
if nodes_sorted_by_degree[i][1] == max_degree:
current_list.append(nodes_sorted_by_degree[i][0])
else:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
if max_degree != -1:
landmarks.extend([current_list[x] for x in spaced_indices])
current_list = [nodes_sorted_by_degree[i][0]]
max_degree = nodes_sorted_by_degree[i][1]
i += 1
if len(landmarks) < NUM_LANDMARKS:
spaced_indices = np.round(np.linspace(0, len(current_list)-1, \
min(num_remaining, len(current_list)))).astype(int)
landmarks.extend([current_list[x] for x in spaced_indices])
# make the first line the landmarks and make them all router nodes
for l in landmarks[:NUM_LANDMARKS]:
f1.write(str(l) + "r ")
f1.write("\n")
total_budget = balance_per_channel * len(G.edges())
weights = {e: min(G.degree(e[0]), G.degree(e[1])) for e in G.edges()}
sum_weights = sum(weights.values())
capacity_dict = dict()
# get lnd capacity data
lnd_capacities_graph = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
lnd_capacities = list(nx.get_edge_attributes(lnd_capacities_graph, 'capacity').values())
# write rest of topology
real_rtts = np.loadtxt(LND_FILE_PATH + "ping_times_data")
for e in G.edges():
f1.write(str(e[0]) + "r " + str(e[1]) + "r ")
if not random_channel_capacity and is_lnd and "uniform" not in output_filename:
delay_per_channel = np.random.choice(real_rtts) / 2.0
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
else:
f1.write(str(delay_per_channel) + " " + str(delay_per_channel) + " ")
if random_channel_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 2:
balance_for_this_channel = round(np.random.normal(balance_per_channel, \
0.75 * balance_per_channel))
elif lnd_capacity:
balance_for_this_channel = -1
while balance_for_this_channel < 40:
balance_for_this_channel = round(np.random.choice(lnd_capacities) * \
(balance_per_channel / np.mean(lnd_capacities)))
elif is_lnd and "uniform" not in output_filename:
if "lessScale" in output_filename:
balance_for_this_channel = float(G[e[0]][e[1]]['capacity'] *10 * balance_per_channel)
else:
# print("check blanace")
# base case
balance_for_this_channel = 16*0.00011111*(float(G[e[0]][e[1]]['capacity']))
else:
balance_for_this_channel = balance_per_channel
capacity_dict[e] = balance_for_this_channel
if randomize_init_bal:
one_end_bal = random.randint(1, balance_for_this_channel)
other_end_bal = balance_for_this_channel - one_end_bal
f1.write(str(one_end_bal) + " " + str(other_end_bal) + " ")
else:
f1.write(str(round(balance_for_this_channel/2)) + " " + \
str(round(balance_for_this_channel/2)) + " ")
# *************************Writing Fees to network**************************
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
# generate extra end host nodes
if separate_end_hosts :
for n in G.nodes():
f1.write(str(n) + "e " + str(n) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
if rebalancing_enabled:
f1.write(str(REASONABLE_BALANCE) + " " + str(REASONABLE_ROUTER_BALANCE) + " ")
else:
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.write(str(G[e[0]][e[1]]['bf1']) + " " + str(G[e[0]][e[1]]['bf2']) + " ");
f1.write(str(G[e[0]][e[1]]['fr1']) + " " + str(G[e[0]][e[1]]['fr2']) + "\n");
if args.graph_type == "parallel_graph":
for (e,r) in zip([1,3], [0, 2]):
f1.write(str(e) + "e " + str(r) + "r ")
f1.write(str(end_host_delay) + " " + str(end_host_delay) + " ")
f1.write(str(LARGE_BALANCE/2) + " " + str(LARGE_BALANCE/2) + " ")
f1.close()
nx.set_edge_attributes(G, capacity_dict, 'capacity')
# parse arguments
parser = argparse.ArgumentParser(description="Create arbitrary topologies to run the omnet simulator on")
parser.add_argument('--num-nodes', type=int, dest='num_nodes', help='number of nodes in the graph', default=20)
parser.add_argument('--delay-per-channel', type=int, dest='delay_per_channel', \
help='delay between nodes (ms)', default=30)
parser.add_argument('graph_type', choices=['small_world', 'scale_free', 'hotnets_topo', 'simple_line', 'toy_dctcp', \
'simple_deadlock', 'simple_topologies', 'parallel_graph', 'dag_example', 'lnd_dec4_2018','lnd_dec4_2018lessScale', \
'lnd_dec4_2018_randomCap', 'lnd_dec4_2018_modified', 'lnd_uniform', 'tree', 'random', \
'lnd_july15_2019'], \
help='type of graph (Small world or scale free or custom topology list)', default='small_world')
parser.add_argument('--balance-per-channel', type=int, dest='balance_per_channel', default=100)
parser.add_argument('--topo-filename', dest='topo_filename', type=str, \
help='name of intermediate output file', default="topo.txt")
parser.add_argument('--network-name', type=str, dest='network_name', \
help='name of the output ned filename', default='simpleNet')
parser.add_argument('--separate-end-hosts', action='store_true', \
help='do you need separate end hosts that only send transactions')
parser.add_argument('--randomize-start-bal', type=str, dest='randomize_start_bal', \
help='Do not start from pergect balance, but rather randomize it', default='False')
parser.add_argument('--random-channel-capacity', type=str, dest='random_channel_capacity', \
help='Give channels a random balance between bal/2 and bal', default='False')
parser.add_argument('--lnd-channel-capacity', type=str, dest='lnd_capacity', \
help='Give channels a random balance sampled from lnd', default='False')
parser.add_argument('--rebalancing-enabled', type=str, dest="rebalancing_enabled",\
help="should the end host router channel be reasonably sized", default="false")
routing_alg_list = ['lndBaseline']
args = parser.parse_args()
np.random.seed(SEED)
random.seed(SEED)
# generate graph and print topology and ned file
if args.num_nodes <= 5 and args.graph_type == 'simple_topologies':
if args.num_nodes == 2:
G = two_node_graph
elif args.num_nodes == 3:
G = three_node_graph
elif args.num_nodes == 4:
G = four_node_graph
elif 'line' in args.network_name:
G = five_line_graph
else:
G = five_node_graph
elif args.graph_type in ['small_world', 'scale_free', 'tree', 'random']:
if "sparse" in args.topo_filename:
args.graph_type = args.graph_type + "_sparse"
G = generate_graph(args.num_nodes, args.graph_type)
elif args.graph_type == 'toy_dctcp':
G = toy_dctcp_graph
elif args.graph_type == 'dag_example':
print("generating dag example")
G = dag_example_graph
elif args.graph_type == 'parallel_graph':
G = parallel_graph
elif args.graph_type == 'hotnets_topo':
G = hotnets_topo_graph
elif args.graph_type == 'simple_deadlock':
G = simple_deadlock_graph
args.separate_end_hosts = False
elif args.graph_type.startswith('lnd_'):
G = nx.read_edgelist(LND_FILE_PATH + 'lnd_july15_2019_reducedsize' + '.edgelist')
else:
G = simple_line_graph
args.separate_end_hosts = False
args.randomize_start_bal = args.randomize_start_bal == 'true'
args.random_channel_capacity = args.random_channel_capacity == 'true'
args.lnd_capacity = args.lnd_capacity == 'true'
print_topology_in_format(G, args.balance_per_channel, args.delay_per_channel, args.topo_filename, \
args.separate_end_hosts, args.randomize_start_bal, args.random_channel_capacity,\
args.lnd_capacity, args.graph_type.startswith('lnd_'), args.rebalancing_enabled == "true")
network_base = os.path.basename(args.network_name)
for routing_alg in routing_alg_list:
write_ned_file(args.topo_filename, args.network_name + '_' + routing_alg + '.ned', \
network_base, routing_alg) | 0.138899 | 0.215351 |
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('OUTPUT.avi', fourcc, 20.0, (640, 480))
font = cv2.FONT_HERSHEY_SIMPLEX
while cap.isOpened():
_, img = cap.read()
#print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
out.write(img)
screencenter = (640//2, 480//2)
cv2.circle(img, screencenter, 5, (0, 255, 0), 1)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
rectagleCenterPont = ((x + x + w) // 2, (y + y + h) // 2)
cv2.circle(img, rectagleCenterPont, 5, (0, 0, 255), 1)
x1 = (x + x + w) // 2
y1 = (y + y + h) // 2
x2 = 640//2
y2 = 480//2
z = (x+w * y+h)//100
dist = pow((((x2-x1)*(x2-x1)) + ((y2-y1)*(y2-y1))), (1/2))
text = 'Vector: ' + str(dist)
cv2.putText(img, text, (10, 25), font, 0.5, (255, 255, 0), 1)
coordinates = 'X coordinates: ' + str(x1) + ' Y coordinates: ' + str(y1)
zcoord = 'Z coordinate: ' + str(z)
cv2.putText(img, coordinates, (10, 50), font, 0.5, (255, 255, 0), 1)
cv2.putText(img, zcoord, (10, 75), font, 0.5, (255, 255, 0), 1)
if(x1<=310):
cmd1 = 'MOVE RIGHT:'
cv2.putText(img, cmd1, (10, 90), font, 0.5, (0, 0, 255), 1)
elif(x1>=330):
cmd2 = 'MOVE LEFT: '
cv2.putText(img, cmd2, (10, 90), font, 0.5, (0, 0, 255), 1)
else:
cmd3 = 'X AXIS ALLIEND'
cv2.putText(img, cmd3, (10, 90), font, 0.5, (0, 0, 255), 1)
if (y1 <= 230):
cmd4 = 'MOVE DOWN:'
cv2.putText(img, cmd4, (10, 105), font, 0.5, (0, 0, 255), 1)
elif (y1 >= 260):
cmd5 = 'MOVE UP: '
cv2.putText(img, cmd5, (10, 105), font, 0.5, (0, 0, 255), 1)
else:
cmd6 = 'Y AXIS ALLIEND'
cv2.putText(img, cmd6, (10, 105), font, 0.5, (0, 0, 255), 1)
cv2.imshow('frame', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | dronecontrol.py | import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('OUTPUT.avi', fourcc, 20.0, (640, 480))
font = cv2.FONT_HERSHEY_SIMPLEX
while cap.isOpened():
_, img = cap.read()
#print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
out.write(img)
screencenter = (640//2, 480//2)
cv2.circle(img, screencenter, 5, (0, 255, 0), 1)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
rectagleCenterPont = ((x + x + w) // 2, (y + y + h) // 2)
cv2.circle(img, rectagleCenterPont, 5, (0, 0, 255), 1)
x1 = (x + x + w) // 2
y1 = (y + y + h) // 2
x2 = 640//2
y2 = 480//2
z = (x+w * y+h)//100
dist = pow((((x2-x1)*(x2-x1)) + ((y2-y1)*(y2-y1))), (1/2))
text = 'Vector: ' + str(dist)
cv2.putText(img, text, (10, 25), font, 0.5, (255, 255, 0), 1)
coordinates = 'X coordinates: ' + str(x1) + ' Y coordinates: ' + str(y1)
zcoord = 'Z coordinate: ' + str(z)
cv2.putText(img, coordinates, (10, 50), font, 0.5, (255, 255, 0), 1)
cv2.putText(img, zcoord, (10, 75), font, 0.5, (255, 255, 0), 1)
if(x1<=310):
cmd1 = 'MOVE RIGHT:'
cv2.putText(img, cmd1, (10, 90), font, 0.5, (0, 0, 255), 1)
elif(x1>=330):
cmd2 = 'MOVE LEFT: '
cv2.putText(img, cmd2, (10, 90), font, 0.5, (0, 0, 255), 1)
else:
cmd3 = 'X AXIS ALLIEND'
cv2.putText(img, cmd3, (10, 90), font, 0.5, (0, 0, 255), 1)
if (y1 <= 230):
cmd4 = 'MOVE DOWN:'
cv2.putText(img, cmd4, (10, 105), font, 0.5, (0, 0, 255), 1)
elif (y1 >= 260):
cmd5 = 'MOVE UP: '
cv2.putText(img, cmd5, (10, 105), font, 0.5, (0, 0, 255), 1)
else:
cmd6 = 'Y AXIS ALLIEND'
cv2.putText(img, cmd6, (10, 105), font, 0.5, (0, 0, 255), 1)
cv2.imshow('frame', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 0.046627 | 0.186299 |
from app.catalog.domain.category import Category
from app.catalog.domain.product import Product
from app.catalog.infra.repository.sql_product_repository import SqlProductRepository
def test_save(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
# When
SqlProductRepository(db_session).save(product)
# Then
assert db_session.query(Product).first().name == '꼬북칩'
def test_save_with_categories(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
product.categories.append(Category(name='제과'))
product.categories.append(Category(name='어린이'))
# When
SqlProductRepository(db_session).save(product)
# Then
result_product = db_session.query(Product).first()
assert result_product.name == '꼬북칩'
assert len(result_product.categories) == 2
assert set([category.name for category in result_product.categories]) == \
set(['제과', '어린이'])
def test_remove_by_id(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
db_session.add(product)
db_session.commit()
assert db_session.query(Product).count() == 1
# When
SqlProductRepository(db_session).remove_by_id(product.id)
# Then
assert db_session.query(Product).count() == 0
def test_find_all(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_all()
# Then
assert len(result) == 5
def test_find_by_id(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_by_id(2)
# Then
assert result.id == 2
assert result.name == '꼬북칩 2'
def test_find_by_category(pre_data_db_session):
repository = SqlProductRepository(pre_data_db_session)
category_1 = pre_data_db_session.query(
Category).filter(Category.name == '전자제품').first()
category_2 = pre_data_db_session.query(
Category).filter(Category.name == '필기구').first()
# When
category_1_products = repository.find_by_category(category_1, 0, 10)
category_2_products = repository.find_by_category(category_2, 0, 10)
# Then
assert len(category_1_products) == 2
assert len(category_2_products) == 2
def test_counts_by_category(db_session):
repository = SqlProductRepository(db_session)
category_1 = Category(name='제과')
category_2 = Category(name='아동')
db_session.add_all([category_1, category_2])
db_session.commit()
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요', categories=[category_1, category_2]))
for i in range(1, 21):
db_session.add(
Product(name=f'장난감 {i}', price=2000, detail='재미있지요', categories=[category_2]))
# When
assert repository.counts_by_category(category_1) == 5
assert repository.counts_by_category(category_2) == 25 | app/tests/catalog/infra/repository/test_sql_product_repository.py | from app.catalog.domain.category import Category
from app.catalog.domain.product import Product
from app.catalog.infra.repository.sql_product_repository import SqlProductRepository
def test_save(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
# When
SqlProductRepository(db_session).save(product)
# Then
assert db_session.query(Product).first().name == '꼬북칩'
def test_save_with_categories(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
product.categories.append(Category(name='제과'))
product.categories.append(Category(name='어린이'))
# When
SqlProductRepository(db_session).save(product)
# Then
result_product = db_session.query(Product).first()
assert result_product.name == '꼬북칩'
assert len(result_product.categories) == 2
assert set([category.name for category in result_product.categories]) == \
set(['제과', '어린이'])
def test_remove_by_id(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
db_session.add(product)
db_session.commit()
assert db_session.query(Product).count() == 1
# When
SqlProductRepository(db_session).remove_by_id(product.id)
# Then
assert db_session.query(Product).count() == 0
def test_find_all(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_all()
# Then
assert len(result) == 5
def test_find_by_id(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_by_id(2)
# Then
assert result.id == 2
assert result.name == '꼬북칩 2'
def test_find_by_category(pre_data_db_session):
repository = SqlProductRepository(pre_data_db_session)
category_1 = pre_data_db_session.query(
Category).filter(Category.name == '전자제품').first()
category_2 = pre_data_db_session.query(
Category).filter(Category.name == '필기구').first()
# When
category_1_products = repository.find_by_category(category_1, 0, 10)
category_2_products = repository.find_by_category(category_2, 0, 10)
# Then
assert len(category_1_products) == 2
assert len(category_2_products) == 2
def test_counts_by_category(db_session):
repository = SqlProductRepository(db_session)
category_1 = Category(name='제과')
category_2 = Category(name='아동')
db_session.add_all([category_1, category_2])
db_session.commit()
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요', categories=[category_1, category_2]))
for i in range(1, 21):
db_session.add(
Product(name=f'장난감 {i}', price=2000, detail='재미있지요', categories=[category_2]))
# When
assert repository.counts_by_category(category_1) == 5
assert repository.counts_by_category(category_2) == 25 | 0.567577 | 0.506713 |
import os
import numpy as np
import h5py
from PIL import Image
from numpy.core.shape_base import stack
from utils.data_io import numpy2image, image2numpy
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def read_images_from_folder(data_path, img_size=224, img_nc=3):
img_list = sorted([f for f in os.listdir(data_path) if any(f.lower().endswith(ext) for ext in IMG_EXTENSIONS)])
images = []
print('Reading images from: {}'.format(data_path))
for i in range(len(img_list)):
image = Image.open(os.path.join(data_path, img_list[i]))
if img_nc == 1:
image = image.convert('L')
else:
image = image.convert('RGB')
image = image.resize((img_size, img_size))
image = image2numpy(image)
images.append(image)
images = np.stack(images, axis=0)
print('Images loaded, shape: {}'.format(images.shape))
return images
class AlignedDataSet(object):
def __init__(self, src_img_path, tgt_img_path, cache=None, load_size=128, crop_size=128, img_nc=3, num_images=None, random_crop=False,
random_flip=False, shuffle=False):
self.src_img_path = src_img_path
self.tgt_img_path = tgt_img_path
self.load_size = load_size
self.crop_size = crop_size
self.img_size = self.crop_size if random_crop else self.load_size
self.img_nc = img_nc
self.random_flip = random_flip
self.random_crop = random_crop
if cache != None and os.path.exists(cache):
with h5py.File(cache, 'r') as h5:
self.src_images = np.asarray(h5['src_images'], dtype=np.float32)
self.tgt_images = np.asarray(h5['tgt_images'], dtype=np.float32)
else:
self.src_images = read_images_from_folder(src_img_path, load_size, img_nc)
self.tgt_images = read_images_from_folder(tgt_img_path, load_size, img_nc)
if cache != None:
with h5py.File(cache, 'w') as h5:
h5.create_dataset('src_images', data=self.src_images)
h5.create_dataset('tgt_images', data=self.tgt_images)
assert len(self.src_images) == len(self.tgt_images), 'number of source and target images must be equal.'
if num_images:
self.num_images = min(num_images, len(self.src_images))
else:
self.num_images = len(self.src_images)
if shuffle:
self.indices = np.random.permutation(self.num_images)
self.src_images = self.src_images[self.indices]
self.tgt_images = self.tgt_images[self.indices]
def __getitem__(self, index):
src_img = self.src_images[index]
tgt_img = self.tgt_images[index]
out_src_img = []
out_tgt_img = []
for (src, tgt) in zip(src_img, tgt_img):
if self.random_flip and np.random.rand() < 0.5:
# flip array
src = np.fliplr(src)
tgt = np.fliplr(tgt)
if self.random_crop and self.crop_size < self.load_size:
crop_x = np.random.randint(0, self.load_size - self.crop_size + 1)
crop_y = np.random.randint(0, self.load_size - self.crop_size + 1)
src = src[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size]
tgt = tgt[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size]
out_src_img.append(src)
out_tgt_img.append(tgt)
return (np.stack(out_src_img, axis=0), np.stack(out_tgt_img, axis=0))
def __len__(self):
return self.num_images | dataloader/aligned_dataset.py | import os
import numpy as np
import h5py
from PIL import Image
from numpy.core.shape_base import stack
from utils.data_io import numpy2image, image2numpy
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def read_images_from_folder(data_path, img_size=224, img_nc=3):
img_list = sorted([f for f in os.listdir(data_path) if any(f.lower().endswith(ext) for ext in IMG_EXTENSIONS)])
images = []
print('Reading images from: {}'.format(data_path))
for i in range(len(img_list)):
image = Image.open(os.path.join(data_path, img_list[i]))
if img_nc == 1:
image = image.convert('L')
else:
image = image.convert('RGB')
image = image.resize((img_size, img_size))
image = image2numpy(image)
images.append(image)
images = np.stack(images, axis=0)
print('Images loaded, shape: {}'.format(images.shape))
return images
class AlignedDataSet(object):
def __init__(self, src_img_path, tgt_img_path, cache=None, load_size=128, crop_size=128, img_nc=3, num_images=None, random_crop=False,
random_flip=False, shuffle=False):
self.src_img_path = src_img_path
self.tgt_img_path = tgt_img_path
self.load_size = load_size
self.crop_size = crop_size
self.img_size = self.crop_size if random_crop else self.load_size
self.img_nc = img_nc
self.random_flip = random_flip
self.random_crop = random_crop
if cache != None and os.path.exists(cache):
with h5py.File(cache, 'r') as h5:
self.src_images = np.asarray(h5['src_images'], dtype=np.float32)
self.tgt_images = np.asarray(h5['tgt_images'], dtype=np.float32)
else:
self.src_images = read_images_from_folder(src_img_path, load_size, img_nc)
self.tgt_images = read_images_from_folder(tgt_img_path, load_size, img_nc)
if cache != None:
with h5py.File(cache, 'w') as h5:
h5.create_dataset('src_images', data=self.src_images)
h5.create_dataset('tgt_images', data=self.tgt_images)
assert len(self.src_images) == len(self.tgt_images), 'number of source and target images must be equal.'
if num_images:
self.num_images = min(num_images, len(self.src_images))
else:
self.num_images = len(self.src_images)
if shuffle:
self.indices = np.random.permutation(self.num_images)
self.src_images = self.src_images[self.indices]
self.tgt_images = self.tgt_images[self.indices]
def __getitem__(self, index):
src_img = self.src_images[index]
tgt_img = self.tgt_images[index]
out_src_img = []
out_tgt_img = []
for (src, tgt) in zip(src_img, tgt_img):
if self.random_flip and np.random.rand() < 0.5:
# flip array
src = np.fliplr(src)
tgt = np.fliplr(tgt)
if self.random_crop and self.crop_size < self.load_size:
crop_x = np.random.randint(0, self.load_size - self.crop_size + 1)
crop_y = np.random.randint(0, self.load_size - self.crop_size + 1)
src = src[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size]
tgt = tgt[crop_x:crop_x+self.crop_size, crop_y:crop_y+self.crop_size]
out_src_img.append(src)
out_tgt_img.append(tgt)
return (np.stack(out_src_img, axis=0), np.stack(out_tgt_img, axis=0))
def __len__(self):
return self.num_images | 0.214774 | 0.347842 |
from ... pyaz_utils import _call_az
from . import file, subtask
def create(job_id, account_endpoint=None, account_key=None, account_name=None, affinity_id=None, application_package_references=None, command_line=None, environment_settings=None, json_file=None, max_task_retry_count=None, max_wall_clock_time=None, resource_files=None, retention_time=None, task_id=None):
'''
Create Batch tasks.
Required Parameters:
- job_id -- The ID of the job containing the task.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- The Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- The Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- affinity_id -- Required. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.
- application_package_references -- The space-separated list of IDs specifying the application packages to be installed. Space-separated application IDs with optional version in 'id[#version]' format.
- command_line -- The command line of the task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.
- environment_settings -- A list of environment variable settings for the task. Space-separated values in 'key=value' format.
- json_file -- The file containing the task(s) to create in JSON(formatted to match REST API request body). When submitting multiple tasks, accepts either an array of tasks or a TaskAddCollectionParamater. If this parameter is specified, all other parameters are ignored.
- max_task_retry_count -- The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.
- max_wall_clock_time -- If this is not specified, there is no time limit on how long the Task may run.
- resource_files -- A list of files that the Batch service will download to the compute node before running the command line. Space-separated resource references in filename=httpurl format, with httpurl being any HTTP url with public access or a SAS url with read access.
- retention_time -- The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.
- task_id -- The ID of the task.
'''
return _call_az("az batch task create", locals())
def list(job_id, account_endpoint=None, account_key=None, account_name=None, expand=None, filter=None, select=None):
'''
Required Parameters:
- job_id -- The ID of the Job.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- expand -- An OData $expand clause.
- filter -- An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks.
- select -- An OData $select clause.
'''
return _call_az("az batch task list", locals())
def delete(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, yes=None):
'''
Required Parameters:
- job_id -- The ID of the Job from which to delete the Task.
- task_id -- The ID of the Task to delete.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az batch task delete", locals())
def show(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, expand=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, select=None):
'''
Required Parameters:
- job_id -- The ID of the Job that contains the Task.
- task_id -- The ID of the Task to get information about.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- expand -- An OData $expand clause.
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- select -- An OData $select clause.
'''
return _call_az("az batch task show", locals())
def reset(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, json_file=None, max_task_retry_count=None, max_wall_clock_time=None, retention_time=None):
'''
Reset the properties of a Batch task.
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to update.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- json_file -- A file containing the constraints specification in JSON (formatted to match the respective REST API body). If this parameter is specified, all 'Constraints Arguments' are ignored.
- max_task_retry_count -- The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.
- max_wall_clock_time -- If this is not specified, there is no time limit on how long the Task may run. Expected format is an ISO-8601 duration.
- retention_time -- The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. Expected format is an ISO-8601 duration.
'''
return _call_az("az batch task reset", locals())
def reactivate(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None):
'''
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to reactivate.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
'''
return _call_az("az batch task reactivate", locals())
def stop(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None):
'''
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to terminate.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
'''
return _call_az("az batch task stop", locals()) | pyaz/batch/task/__init__.py | from ... pyaz_utils import _call_az
from . import file, subtask
def create(job_id, account_endpoint=None, account_key=None, account_name=None, affinity_id=None, application_package_references=None, command_line=None, environment_settings=None, json_file=None, max_task_retry_count=None, max_wall_clock_time=None, resource_files=None, retention_time=None, task_id=None):
'''
Create Batch tasks.
Required Parameters:
- job_id -- The ID of the job containing the task.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- The Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- The Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- affinity_id -- Required. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere.
- application_package_references -- The space-separated list of IDs specifying the application packages to be installed. Space-separated application IDs with optional version in 'id[#version]' format.
- command_line -- The command line of the task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux.
- environment_settings -- A list of environment variable settings for the task. Space-separated values in 'key=value' format.
- json_file -- The file containing the task(s) to create in JSON(formatted to match REST API request body). When submitting multiple tasks, accepts either an array of tasks or a TaskAddCollectionParamater. If this parameter is specified, all other parameters are ignored.
- max_task_retry_count -- The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.
- max_wall_clock_time -- If this is not specified, there is no time limit on how long the Task may run.
- resource_files -- A list of files that the Batch service will download to the compute node before running the command line. Space-separated resource references in filename=httpurl format, with httpurl being any HTTP url with public access or a SAS url with read access.
- retention_time -- The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted.
- task_id -- The ID of the task.
'''
return _call_az("az batch task create", locals())
def list(job_id, account_endpoint=None, account_key=None, account_name=None, expand=None, filter=None, select=None):
'''
Required Parameters:
- job_id -- The ID of the Job.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- expand -- An OData $expand clause.
- filter -- An OData $filter clause. For more information on constructing this filter, see https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks.
- select -- An OData $select clause.
'''
return _call_az("az batch task list", locals())
def delete(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, yes=None):
'''
Required Parameters:
- job_id -- The ID of the Job from which to delete the Task.
- task_id -- The ID of the Task to delete.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az batch task delete", locals())
def show(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, expand=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, select=None):
'''
Required Parameters:
- job_id -- The ID of the Job that contains the Task.
- task_id -- The ID of the Task to get information about.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- expand -- An OData $expand clause.
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- select -- An OData $select clause.
'''
return _call_az("az batch task show", locals())
def reset(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None, json_file=None, max_task_retry_count=None, max_wall_clock_time=None, retention_time=None):
'''
Reset the properties of a Batch task.
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to update.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
- json_file -- A file containing the constraints specification in JSON (formatted to match the respective REST API body). If this parameter is specified, all 'Constraints Arguments' are ignored.
- max_task_retry_count -- The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit.
- max_wall_clock_time -- If this is not specified, there is no time limit on how long the Task may run. Expected format is an ISO-8601 duration.
- retention_time -- The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. Expected format is an ISO-8601 duration.
'''
return _call_az("az batch task reset", locals())
def reactivate(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None):
'''
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to reactivate.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
'''
return _call_az("az batch task reactivate", locals())
def stop(job_id, task_id, account_endpoint=None, account_key=None, account_name=None, if_match=None, if_modified_since=None, if_none_match=None, if_unmodified_since=None):
'''
Required Parameters:
- job_id -- The ID of the Job containing the Task.
- task_id -- The ID of the Task to terminate.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
- if_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service exactly matches the value specified by the client.
- if_modified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time.
- if_none_match -- An ETag value associated with the version of the resource known to the client. The operation will be performed only if the resource's current ETag on the service does not match the value specified by the client.
- if_unmodified_since -- A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has not been modified since the specified time.
'''
return _call_az("az batch task stop", locals()) | 0.795857 | 0.262354 |
import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train and test errors of AdaBoost
model = AdaBoost(DecisionStump, n_learners).fit(train_X, train_y)
model.fit(train_X, train_y)
num_learners = np.arange(start=1, stop=n_learners + 1)
train_errors = []
test_errors = []
for t in num_learners:
train_errors.append(model.partial_loss(train_X, train_y, t))
test_errors.append(model.partial_loss(test_X, test_y, t))
fig = go.Figure([
go.Scatter(x=num_learners, y=train_errors,
mode='markers + lines', name=r'$Train samples$'),
go.Scatter(x=num_learners, y=test_errors,
mode='markers + lines', name=r'$Test samples$')])
fig.update_layout(
title=f"Train and test errors of AdaBoost classifier"
f"<br>using decision tree with {noise} noise.",
xaxis=dict(title="Number of learners used"),
yaxis=dict(title="loss"))
fig.show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0),
np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig = make_subplots(rows=2, cols=2, subplot_titles=[f"{i} learners" for i in T],
horizontal_spacing=.01, vertical_spacing=.03)
for i, t in enumerate(T):
fig.add_traces([decision_surface(
lambda X: model.partial_predict(X, t), lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(test_y == 1).astype(int),
symbol=class_symbols[test_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=0.5)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig.update_layout(
title=f"Decision Boundaries Of AdaBoost classifier using decision tree"
f"<br>with {noise} noise, according to the number of learners.",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Question 3: Decision surface of best performing ensemble
best_size = np.argmin(test_errors) + 1
from IMLearn.metrics import accuracy
fig = go.Figure(data=[decision_surface(
lambda X: model.partial_predict(X, best_size), lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(test_y == 1).astype(int),
symbol=class_symbols[test_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=0.5)))])
fig.update_layout(
title=f"Decision surface of best performing ensemble of"
f"<br>AdaBoost classifier using decision tree with {noise} noise."
f"<br>Ensemble size={best_size}, Accuracy="
f"{accuracy(test_y, model.partial_predict(test_X, best_size))}",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Question 4: Decision surface with weighted samples
fig = go.Figure(data=[decision_surface(
model.predict, lims[0], lims[1], showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(train_y == 1).astype(int),
symbol=class_symbols[train_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1),
size=(model.D_ / np.max(model.D_)) * 5))])
fig.update_layout(
title=f"Decision surface with weighted samples of AdaBoost"
f"<br>classifier using decision tree with {noise} noise.",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4) | exercises/adaboost_scenario.py | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train and test errors of AdaBoost
model = AdaBoost(DecisionStump, n_learners).fit(train_X, train_y)
model.fit(train_X, train_y)
num_learners = np.arange(start=1, stop=n_learners + 1)
train_errors = []
test_errors = []
for t in num_learners:
train_errors.append(model.partial_loss(train_X, train_y, t))
test_errors.append(model.partial_loss(test_X, test_y, t))
fig = go.Figure([
go.Scatter(x=num_learners, y=train_errors,
mode='markers + lines', name=r'$Train samples$'),
go.Scatter(x=num_learners, y=test_errors,
mode='markers + lines', name=r'$Test samples$')])
fig.update_layout(
title=f"Train and test errors of AdaBoost classifier"
f"<br>using decision tree with {noise} noise.",
xaxis=dict(title="Number of learners used"),
yaxis=dict(title="loss"))
fig.show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0),
np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig = make_subplots(rows=2, cols=2, subplot_titles=[f"{i} learners" for i in T],
horizontal_spacing=.01, vertical_spacing=.03)
for i, t in enumerate(T):
fig.add_traces([decision_surface(
lambda X: model.partial_predict(X, t), lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(test_y == 1).astype(int),
symbol=class_symbols[test_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=0.5)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig.update_layout(
title=f"Decision Boundaries Of AdaBoost classifier using decision tree"
f"<br>with {noise} noise, according to the number of learners.",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Question 3: Decision surface of best performing ensemble
best_size = np.argmin(test_errors) + 1
from IMLearn.metrics import accuracy
fig = go.Figure(data=[decision_surface(
lambda X: model.partial_predict(X, best_size), lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(test_y == 1).astype(int),
symbol=class_symbols[test_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=0.5)))])
fig.update_layout(
title=f"Decision surface of best performing ensemble of"
f"<br>AdaBoost classifier using decision tree with {noise} noise."
f"<br>Ensemble size={best_size}, Accuracy="
f"{accuracy(test_y, model.partial_predict(test_X, best_size))}",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
# Question 4: Decision surface with weighted samples
fig = go.Figure(data=[decision_surface(
model.predict, lims[0], lims[1], showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1],
mode="markers", showlegend=False,
marker=dict(color=(train_y == 1).astype(int),
symbol=class_symbols[train_y.astype(int)],
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1),
size=(model.D_ / np.max(model.D_)) * 5))])
fig.update_layout(
title=f"Decision surface with weighted samples of AdaBoost"
f"<br>classifier using decision tree with {noise} noise.",
width=800, height=800, margin=dict(t=100)
).update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4) | 0.911316 | 0.681789 |
from __future__ import print_function
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
import datetime
# TODO: maybe would be cleaner to put this as functions rather than script
class BandPass(object):
"""A class to perform bandpass filtering using Butter filter."""
def __init__(self, lowcut=0.05, highcut=0.25, fs=10.0, order=3):
"""lowcut, highcut and fs are in Hz."""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
self.b, self.a = butter(order, [low, high], btype='band')
def filter_data(self, data):
"""filter the data."""
result = lfilter(self.b, self.a, data)
return(result)
path_IMU_data = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ"
path_output = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ.csv"
# load the saved data
with open(path_IMU_data, "rb") as crrt_file:
dict_data_loaded_IMU = pickle.load(crrt_file)
list_IMUs_for_plot = ["3"]
for crrt_IMU in list_IMUs_for_plot:
size_data = np.size(dict_data_loaded_IMU[crrt_IMU].D)
print("IMU {}".format(crrt_IMU))
print("Number of points: {}".format(size_data))
print("Corresponding duration (hr): {}".format(size_data / 10.0 / 3600))
print("Corresponding numbe of 15 minutes files read: {}".format(size_data / 10 / 3600 * 4.0))
dict_filtered_resampled_data = {}
FS = 10
band_pass_filter = BandPass(lowcut=0.03, highcut=0.25, order=2)
str_time_min = "2019-04-02 09:30:00.000"
time_min = datetime.datetime.strptime(str_time_min, "%Y-%m-%d %H:%M:%S.%f")
time_max = datetime.datetime.strptime("2019-04-02 14:00:00.000", "%Y-%m-%d %H:%M:%S.%f")
time_base_start = 0
time_base_duration = (time_max - time_min).total_seconds() - time_base_start
time_base = np.arange(start=time_base_start, stop=time_base_duration, step=1.0 / FS)
for crrt_IMU in list_IMUs_for_plot:
print("Look at instrument {}".format(crrt_IMU))
acc_D_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].D)
acc_N_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].N)
acc_E_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].E)
acc_X_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].X)
acc_Y_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Y)
acc_Z_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Z)
Yaw_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Yaw)
Pitch_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Pitch)
Roll_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Roll)
time_sec_since_time_min = []
for crrt_timestamp in dict_data_loaded_IMU[crrt_IMU].T:
time_sec_since_time_min.append((crrt_timestamp - time_min).total_seconds())
time_sec_since_time_min = np.array(time_sec_since_time_min)
delta_time = time_sec_since_time_min[1:] - time_sec_since_time_min[0:-1]
delta_time_anomaly = delta_time - 0.1
missing_points = np.where(delta_time_anomaly > 0.06)
number_missing_points = np.sum(delta_time_anomaly[missing_points])
total_number_points = time_sec_since_time_min.size
percentage_missing_points = number_missing_points * 100.0 / total_number_points
print("percentage missing points: {}".format(percentage_missing_points))
acc_D_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_D_filtered)
acc_N_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_N_filtered)
acc_E_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_E_filtered)
acc_X_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_X_filtered)
acc_Y_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Y_filtered)
acc_Z_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Z_filtered)
Yaw_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Yaw_filtered)
Pitch_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Pitch_filtered)
Roll_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Roll_filtered)
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_D_filtered_resampled, label="filtered resampled D")
plt.plot(time_base, acc_N_filtered_resampled, label="filtered resampled N")
plt.plot(time_base, acc_E_filtered_resampled, label="filtered resampled E")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_X_filtered_resampled, label="filtered resampled X")
plt.plot(time_base, acc_Y_filtered_resampled, label="filtered resampled Y")
plt.plot(time_base, acc_Z_filtered_resampled, label="filtered resampled Z")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, Yaw_filtered_resampled, label="filtered resampled Yaw")
plt.plot(time_base, Pitch_filtered_resampled, label="filtered resampled Pitch")
plt.plot(time_base, Roll_filtered_resampled, label="filtered resampled Roll")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 2
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
# TODO: add quality check figure yaw pitch roll
data_IMU_filtered_resampled = np.zeros((time_base.size, 10))
data_IMU_filtered_resampled[:, 0] = time_base
data_IMU_filtered_resampled[:, 1] = acc_X_filtered_resampled
data_IMU_filtered_resampled[:, 2] = acc_Y_filtered_resampled
data_IMU_filtered_resampled[:, 3] = acc_Z_filtered_resampled
data_IMU_filtered_resampled[:, 4] = acc_N_filtered_resampled
data_IMU_filtered_resampled[:, 5] = acc_E_filtered_resampled
data_IMU_filtered_resampled[:, 6] = acc_D_filtered_resampled
data_IMU_filtered_resampled[:, 7] = Yaw_filtered_resampled
data_IMU_filtered_resampled[:, 8] = Pitch_filtered_resampled
data_IMU_filtered_resampled[:, 9] = Roll_filtered_resampled
# TODO: add yaw pitch roll
crrt_file_save = path_output + "CSV_DATA_" + str(crrt_IMU) + ".csv"
header = "Seconds_since_{} ACC_X ACC_Y ACC_Z ACC_N ACC_E ACC_D YAW PITCH ROLL".format(str_time_min)
np.savetxt(crrt_file_save, data_IMU_filtered_resampled, header=header)
end = True | processing_scripts/filter_resample_csvWrite_acceleration.py | from __future__ import print_function
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
import datetime
# TODO: maybe would be cleaner to put this as functions rather than script
class BandPass(object):
"""A class to perform bandpass filtering using Butter filter."""
def __init__(self, lowcut=0.05, highcut=0.25, fs=10.0, order=3):
"""lowcut, highcut and fs are in Hz."""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
self.b, self.a = butter(order, [low, high], btype='band')
def filter_data(self, data):
"""filter the data."""
result = lfilter(self.b, self.a, data)
return(result)
path_IMU_data = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ"
path_output = "/home/jrlab/Desktop/Data/Data_For_Aleksey_Harbor_2019/Out/3/_IMU_TNEDXYZ.csv"
# load the saved data
with open(path_IMU_data, "rb") as crrt_file:
dict_data_loaded_IMU = pickle.load(crrt_file)
list_IMUs_for_plot = ["3"]
for crrt_IMU in list_IMUs_for_plot:
size_data = np.size(dict_data_loaded_IMU[crrt_IMU].D)
print("IMU {}".format(crrt_IMU))
print("Number of points: {}".format(size_data))
print("Corresponding duration (hr): {}".format(size_data / 10.0 / 3600))
print("Corresponding numbe of 15 minutes files read: {}".format(size_data / 10 / 3600 * 4.0))
dict_filtered_resampled_data = {}
FS = 10
band_pass_filter = BandPass(lowcut=0.03, highcut=0.25, order=2)
str_time_min = "2019-04-02 09:30:00.000"
time_min = datetime.datetime.strptime(str_time_min, "%Y-%m-%d %H:%M:%S.%f")
time_max = datetime.datetime.strptime("2019-04-02 14:00:00.000", "%Y-%m-%d %H:%M:%S.%f")
time_base_start = 0
time_base_duration = (time_max - time_min).total_seconds() - time_base_start
time_base = np.arange(start=time_base_start, stop=time_base_duration, step=1.0 / FS)
for crrt_IMU in list_IMUs_for_plot:
print("Look at instrument {}".format(crrt_IMU))
acc_D_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].D)
acc_N_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].N)
acc_E_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].E)
acc_X_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].X)
acc_Y_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Y)
acc_Z_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Z)
Yaw_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Yaw)
Pitch_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Pitch)
Roll_filtered = band_pass_filter.filter_data(dict_data_loaded_IMU[crrt_IMU].Roll)
time_sec_since_time_min = []
for crrt_timestamp in dict_data_loaded_IMU[crrt_IMU].T:
time_sec_since_time_min.append((crrt_timestamp - time_min).total_seconds())
time_sec_since_time_min = np.array(time_sec_since_time_min)
delta_time = time_sec_since_time_min[1:] - time_sec_since_time_min[0:-1]
delta_time_anomaly = delta_time - 0.1
missing_points = np.where(delta_time_anomaly > 0.06)
number_missing_points = np.sum(delta_time_anomaly[missing_points])
total_number_points = time_sec_since_time_min.size
percentage_missing_points = number_missing_points * 100.0 / total_number_points
print("percentage missing points: {}".format(percentage_missing_points))
acc_D_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_D_filtered)
acc_N_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_N_filtered)
acc_E_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_E_filtered)
acc_X_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_X_filtered)
acc_Y_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Y_filtered)
acc_Z_filtered_resampled = np.interp(time_base, time_sec_since_time_min, acc_Z_filtered)
Yaw_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Yaw_filtered)
Pitch_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Pitch_filtered)
Roll_filtered_resampled = np.interp(time_base, time_sec_since_time_min, Roll_filtered)
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_D_filtered_resampled, label="filtered resampled D")
plt.plot(time_base, acc_N_filtered_resampled, label="filtered resampled N")
plt.plot(time_base, acc_E_filtered_resampled, label="filtered resampled E")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, acc_X_filtered_resampled, label="filtered resampled X")
plt.plot(time_base, acc_Y_filtered_resampled, label="filtered resampled Y")
plt.plot(time_base, acc_Z_filtered_resampled, label="filtered resampled Z")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 0.01
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(time_sec_since_time_min, acc_D_filtered, label="filtered D")
plt.plot(time_base, Yaw_filtered_resampled, label="filtered resampled Yaw")
plt.plot(time_base, Pitch_filtered_resampled, label="filtered resampled Pitch")
plt.plot(time_base, Roll_filtered_resampled, label="filtered resampled Roll")
# plt.xlim([3600 * 10 - 120, 3600 * 10 + 120])
ampl_acc_plot = 2
# plt.ylim([-ampl_acc_plot, ampl_acc_plot])
plt.legend()
plt.tight_layout()
plt.show()
# TODO: add quality check figure yaw pitch roll
data_IMU_filtered_resampled = np.zeros((time_base.size, 10))
data_IMU_filtered_resampled[:, 0] = time_base
data_IMU_filtered_resampled[:, 1] = acc_X_filtered_resampled
data_IMU_filtered_resampled[:, 2] = acc_Y_filtered_resampled
data_IMU_filtered_resampled[:, 3] = acc_Z_filtered_resampled
data_IMU_filtered_resampled[:, 4] = acc_N_filtered_resampled
data_IMU_filtered_resampled[:, 5] = acc_E_filtered_resampled
data_IMU_filtered_resampled[:, 6] = acc_D_filtered_resampled
data_IMU_filtered_resampled[:, 7] = Yaw_filtered_resampled
data_IMU_filtered_resampled[:, 8] = Pitch_filtered_resampled
data_IMU_filtered_resampled[:, 9] = Roll_filtered_resampled
# TODO: add yaw pitch roll
crrt_file_save = path_output + "CSV_DATA_" + str(crrt_IMU) + ".csv"
header = "Seconds_since_{} ACC_X ACC_Y ACC_Z ACC_N ACC_E ACC_D YAW PITCH ROLL".format(str_time_min)
np.savetxt(crrt_file_save, data_IMU_filtered_resampled, header=header)
end = True | 0.412294 | 0.260766 |
from amuse.test import amusetest
import numpy
class harmonic_oscillator(object):
def __init__(self,x,v,method=None):
self.x=x
self.v=v
self.model_time=0
self.method=method
def kick(self,dt):
self.v+=-self.x*dt
def drift(self,dt):
self.x+=self.v*dt
def total_energy(self):
return (self.v**2+self.x**2)/2
def evolve_step(self,dt):
self.method(self.kick,self.drift,dt)
self.model_time+=dt
def run_harmonic_oscillator(dt,method):
h=harmonic_oscillator(0.,1.,method=method)
tend=100*2*numpy.pi
data=dict()
data['x']=[]
data['time']=[]
data['de']=[]
E0=h.total_energy()
while h.model_time<tend-dt/2:
h.evolve_step(dt)
data['x'].append(h.x)
data['time'].append(h.model_time)
E=h.total_energy()
data['de'].append(abs(E0-E)/E0)
return data
class TestSymplecticCompositions(amusetest.TestCase):
def test1(self):
from amuse.ext.composition_methods import LEAPFROG
dt1=.1
data=run_harmonic_oscillator(dt1,method=LEAPFROG)
de1=max(data['de'])
dt2=0.01
data=run_harmonic_oscillator(dt2,method=LEAPFROG)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order,2)
def test2(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M6
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M6)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M6)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test3(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M5
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M5)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M5)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test4(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M4
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M4)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M4)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test5(self):
from amuse.ext.composition_methods import SPLIT_6TH_SS_M11
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_6TH_SS_M11)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_6TH_SS_M11)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 6)
def test6(self):
from amuse.ext.composition_methods import SPLIT_6TH_SS_M13
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_6TH_SS_M13)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_6TH_SS_M13)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 6)
def test7(self):
from amuse.ext.composition_methods import SPLIT_8TH_SS_M21
dt1=1.
data=run_harmonic_oscillator(dt1,method=SPLIT_8TH_SS_M21)
de1=max(data['de'])
dt2=0.25
data=run_harmonic_oscillator(dt2,method=SPLIT_8TH_SS_M21)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 8)
def test8(self):
from amuse.ext.composition_methods import SPLIT_10TH_SS_M35
dt1=1.
data=run_harmonic_oscillator(dt1,method=SPLIT_10TH_SS_M35)
de1=max(data['de'])
dt2=0.5
data=run_harmonic_oscillator(dt2,method=SPLIT_10TH_SS_M35)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 10) | src/amuse/test/suite/ext_tests/test_composition_methods.py | from amuse.test import amusetest
import numpy
class harmonic_oscillator(object):
def __init__(self,x,v,method=None):
self.x=x
self.v=v
self.model_time=0
self.method=method
def kick(self,dt):
self.v+=-self.x*dt
def drift(self,dt):
self.x+=self.v*dt
def total_energy(self):
return (self.v**2+self.x**2)/2
def evolve_step(self,dt):
self.method(self.kick,self.drift,dt)
self.model_time+=dt
def run_harmonic_oscillator(dt,method):
h=harmonic_oscillator(0.,1.,method=method)
tend=100*2*numpy.pi
data=dict()
data['x']=[]
data['time']=[]
data['de']=[]
E0=h.total_energy()
while h.model_time<tend-dt/2:
h.evolve_step(dt)
data['x'].append(h.x)
data['time'].append(h.model_time)
E=h.total_energy()
data['de'].append(abs(E0-E)/E0)
return data
class TestSymplecticCompositions(amusetest.TestCase):
def test1(self):
from amuse.ext.composition_methods import LEAPFROG
dt1=.1
data=run_harmonic_oscillator(dt1,method=LEAPFROG)
de1=max(data['de'])
dt2=0.01
data=run_harmonic_oscillator(dt2,method=LEAPFROG)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order,2)
def test2(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M6
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M6)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M6)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test3(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M5
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M5)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M5)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test4(self):
from amuse.ext.composition_methods import SPLIT_4TH_S_M4
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_4TH_S_M4)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_4TH_S_M4)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 4)
def test5(self):
from amuse.ext.composition_methods import SPLIT_6TH_SS_M11
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_6TH_SS_M11)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_6TH_SS_M11)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 6)
def test6(self):
from amuse.ext.composition_methods import SPLIT_6TH_SS_M13
dt1=.5
data=run_harmonic_oscillator(dt1,method=SPLIT_6TH_SS_M13)
de1=max(data['de'])
dt2=0.05
data=run_harmonic_oscillator(dt2,method=SPLIT_6TH_SS_M13)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 6)
def test7(self):
from amuse.ext.composition_methods import SPLIT_8TH_SS_M21
dt1=1.
data=run_harmonic_oscillator(dt1,method=SPLIT_8TH_SS_M21)
de1=max(data['de'])
dt2=0.25
data=run_harmonic_oscillator(dt2,method=SPLIT_8TH_SS_M21)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 8)
def test8(self):
from amuse.ext.composition_methods import SPLIT_10TH_SS_M35
dt1=1.
data=run_harmonic_oscillator(dt1,method=SPLIT_10TH_SS_M35)
de1=max(data['de'])
dt2=0.5
data=run_harmonic_oscillator(dt2,method=SPLIT_10TH_SS_M35)
de2=max(data['de'])
order=int(numpy.log(de2/de1)/numpy.log(dt2/dt1)+0.5)
self.assertEqual(order, 10) | 0.544801 | 0.171685 |
import base64
import io
import pandas as pd
import matplotlib.pyplot as plt
from cachetools import cached, TTLCache
from statsmodels.tsa.arima_model import ARIMA
from flask import Flask, request, render_template, jsonify, abort
app = Flask(__name__)
categories = ['confirmed_US', 'confirmed_global', 'deaths_US', 'deaths_global', 'recovered_global']
@app.route('/liveness')
def alive():
return "OK"
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method == 'POST':
category = request.form.get("category")
key = request.form.get("key")
num_of_days = int(request.form.get("num_of_days", "3"))
p = int(request.form.get("p", "0"))
q = int(request.form.get("q", "1"))
d = int(request.form.get("d", "1"))
plot_data = forecast(category, key, num_of_days, d, p, q)
if not plot_data:
abort(404)
else:
category = categories[0]
key = 'Massachusetts, US'
num_of_days = 3
p, q, d = 0, 1, 1
plot_data = None
return render_template("index.html", categories=categories, category=category, key=key,
num_of_days=num_of_days, p=p, q=q, d=d, plot=plot_data)
@app.route('/forecast')
def get_forecast():
category = request.args.get("category", categories[0])
key = request.args.get("key")
num_of_days = int(request.args.get("num_of_days", "3"))
p = int(request.args.get("p", "0"))
q = int(request.args.get("q", "1"))
d = int(request.args.get("d", "1"))
plot_data = forecast(category, key, num_of_days, d, p, q)
if not plot_data:
abort(404)
return jsonify({'plot': plot_data})
def forecast(category: str, key: str, num_of_days: int, d: int, p: int, q: int):
df = read_csv(category)
if category.endswith('_global') and ',' in key:
key = tuple([k.strip() for k in key.split(',')])
if key:
app.logger.info('Aggregating {} by {}'.format(category, str(key)))
krs = [k for k in df.index if key == k or key in k]
if len(krs) < 1:
app.logger.info(str(key) + ' not found in keys!')
return None
mid = min(len(krs) // 2, 5)
app.logger.info('Found {} key(s): {}{}{}'.format(len(krs), str(krs[:mid]).rstrip(']'),
',..,' if len(krs) > 11 else ',',
str(krs[-mid:]).lstrip('[')))
dr = df.loc[krs, df.columns[12:]].sum()
else:
app.logger.info('Aggregating ' + category)
dr = df[df.columns[12:]].sum()
app.logger.info('Forecasting {} day(s) out using ARIMA with order=({},{},{})'.format(num_of_days, p, q, d))
model = ARIMA(dr.values, order=(p, q, d))
model_fit = model.fit(disp=False)
yhat = model_fit.predict(len(dr.values), len(dr.values) + num_of_days, typ='levels')
app.logger.info('Preparing plot...')
plt.figure(figsize=(10, 10), dpi=100)
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot(dr.index[-num_of_days:], dr.values[-num_of_days:], color='tab:red')
ax1.set(title='COVID-19 ' + category + ' ' + str(key), xlabel='dates', ylabel='num of ' + category)
ax2.plot(range(0, len(yhat)), yhat, color='tab:blue')
ax2.set(xlabel='days', ylabel=category + ' forecast')
app.logger.info('Converting plot to png...')
img = io.BytesIO()
plt.savefig(img, format='png')
plt.close(fig)
img.seek(0)
plot_data = base64.b64encode(img.getvalue()).decode()
return plot_data
@cached(cache=TTLCache(maxsize=1024, ttl=3 * 60 * 60))
def read_csv(cat: str) -> pd.DataFrame:
base_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master'
url = base_url + '/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_' + cat + '.csv'
idx = 'Combined_Key' if cat.endswith('_US') else ['Province/State', 'Country/Region']
app.logger.info('Reading CSV from ' + url + " and indexing on " + str(idx))
return pd.read_csv(url, index_col=idx)
if __name__ == '__main__':
app.run(host="0.0.0.0") | src/app.py | import base64
import io
import pandas as pd
import matplotlib.pyplot as plt
from cachetools import cached, TTLCache
from statsmodels.tsa.arima_model import ARIMA
from flask import Flask, request, render_template, jsonify, abort
app = Flask(__name__)
categories = ['confirmed_US', 'confirmed_global', 'deaths_US', 'deaths_global', 'recovered_global']
@app.route('/liveness')
def alive():
return "OK"
@app.route('/', methods=['GET', 'POST'])
def root():
if request.method == 'POST':
category = request.form.get("category")
key = request.form.get("key")
num_of_days = int(request.form.get("num_of_days", "3"))
p = int(request.form.get("p", "0"))
q = int(request.form.get("q", "1"))
d = int(request.form.get("d", "1"))
plot_data = forecast(category, key, num_of_days, d, p, q)
if not plot_data:
abort(404)
else:
category = categories[0]
key = 'Massachusetts, US'
num_of_days = 3
p, q, d = 0, 1, 1
plot_data = None
return render_template("index.html", categories=categories, category=category, key=key,
num_of_days=num_of_days, p=p, q=q, d=d, plot=plot_data)
@app.route('/forecast')
def get_forecast():
category = request.args.get("category", categories[0])
key = request.args.get("key")
num_of_days = int(request.args.get("num_of_days", "3"))
p = int(request.args.get("p", "0"))
q = int(request.args.get("q", "1"))
d = int(request.args.get("d", "1"))
plot_data = forecast(category, key, num_of_days, d, p, q)
if not plot_data:
abort(404)
return jsonify({'plot': plot_data})
def forecast(category: str, key: str, num_of_days: int, d: int, p: int, q: int):
df = read_csv(category)
if category.endswith('_global') and ',' in key:
key = tuple([k.strip() for k in key.split(',')])
if key:
app.logger.info('Aggregating {} by {}'.format(category, str(key)))
krs = [k for k in df.index if key == k or key in k]
if len(krs) < 1:
app.logger.info(str(key) + ' not found in keys!')
return None
mid = min(len(krs) // 2, 5)
app.logger.info('Found {} key(s): {}{}{}'.format(len(krs), str(krs[:mid]).rstrip(']'),
',..,' if len(krs) > 11 else ',',
str(krs[-mid:]).lstrip('[')))
dr = df.loc[krs, df.columns[12:]].sum()
else:
app.logger.info('Aggregating ' + category)
dr = df[df.columns[12:]].sum()
app.logger.info('Forecasting {} day(s) out using ARIMA with order=({},{},{})'.format(num_of_days, p, q, d))
model = ARIMA(dr.values, order=(p, q, d))
model_fit = model.fit(disp=False)
yhat = model_fit.predict(len(dr.values), len(dr.values) + num_of_days, typ='levels')
app.logger.info('Preparing plot...')
plt.figure(figsize=(10, 10), dpi=100)
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot(dr.index[-num_of_days:], dr.values[-num_of_days:], color='tab:red')
ax1.set(title='COVID-19 ' + category + ' ' + str(key), xlabel='dates', ylabel='num of ' + category)
ax2.plot(range(0, len(yhat)), yhat, color='tab:blue')
ax2.set(xlabel='days', ylabel=category + ' forecast')
app.logger.info('Converting plot to png...')
img = io.BytesIO()
plt.savefig(img, format='png')
plt.close(fig)
img.seek(0)
plot_data = base64.b64encode(img.getvalue()).decode()
return plot_data
@cached(cache=TTLCache(maxsize=1024, ttl=3 * 60 * 60))
def read_csv(cat: str) -> pd.DataFrame:
base_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master'
url = base_url + '/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_' + cat + '.csv'
idx = 'Combined_Key' if cat.endswith('_US') else ['Province/State', 'Country/Region']
app.logger.info('Reading CSV from ' + url + " and indexing on " + str(idx))
return pd.read_csv(url, index_col=idx)
if __name__ == '__main__':
app.run(host="0.0.0.0") | 0.511717 | 0.186077 |
import lxml.html
import lxml.etree
def remove(obj):
obj.getparent().remove(obj)
def remove_all(document, *paths):
for path in paths:
for obj in document.findall(path):
remove(obj)
def pacify(obj):
obj.tag = 'span'
def pacify_all(document, *paths):
for path in paths:
for obj in document.findall(path):
pacify(obj)
def fix_html(htmlbytes):
if b'charset="iso-8859-2"' in htmlbytes:
htmlstr = htmlbytes.decode('ISO-8859-2')
else:
htmlstr = htmlbytes
document = lxml.html.document_fromstring(htmlstr)
subdocument = document.find(".//div[@id='gazeta_article']")
if subdocument is None:
subdocument = document.find(".//div[@id='story']")
if subdocument is not None:
document = subdocument
document = lxml.etree.ElementTree(document)
remove_all(document,
"//div[@class='articleToolBoxBottom']",
"//div[@class='author']",
"//div[@class='authordate']",
"//div[@class='editorPicks ']",
"//div[@class='index mod_zi6']",
"//div[@class='kyoceraBox']",
"//div[@class='mod_inner']",
"//div[@class='more']",
"//div[@class='seealso']",
"//div[@class='test']",
"//div[@class='tylko_int']",
"//div[@id='articleComments']",
"//div[@id='articleCopyright']",
"//div[@id='article_toolbar']",
"//div[@id='banP4']",
"//div[@id='gazeta_article_author']",
"//div[@id='gazeta_article_brand']",
"//div[@id='gazeta_article_image']",
"//div[@id='gazeta_article_likes']",
"//div[@id='gazeta_article_share']",
"//div[@id='gazeta_article_tags']",
"//div[@id='gazeta_article_tools']",
"//div[@id='recommendations']",
"//div[@id='socialNewTools']",
"//h3[@id='tags']",
"//ul[@id='articleToolbar']",
'//like',
'//link',
'//meta',
'//script',
'//style',
)
pacify_all(document,
'//img',
)
return document
# vim:ts=4 sw=4 et | tidy.py |
import lxml.html
import lxml.etree
def remove(obj):
obj.getparent().remove(obj)
def remove_all(document, *paths):
for path in paths:
for obj in document.findall(path):
remove(obj)
def pacify(obj):
obj.tag = 'span'
def pacify_all(document, *paths):
for path in paths:
for obj in document.findall(path):
pacify(obj)
def fix_html(htmlbytes):
if b'charset="iso-8859-2"' in htmlbytes:
htmlstr = htmlbytes.decode('ISO-8859-2')
else:
htmlstr = htmlbytes
document = lxml.html.document_fromstring(htmlstr)
subdocument = document.find(".//div[@id='gazeta_article']")
if subdocument is None:
subdocument = document.find(".//div[@id='story']")
if subdocument is not None:
document = subdocument
document = lxml.etree.ElementTree(document)
remove_all(document,
"//div[@class='articleToolBoxBottom']",
"//div[@class='author']",
"//div[@class='authordate']",
"//div[@class='editorPicks ']",
"//div[@class='index mod_zi6']",
"//div[@class='kyoceraBox']",
"//div[@class='mod_inner']",
"//div[@class='more']",
"//div[@class='seealso']",
"//div[@class='test']",
"//div[@class='tylko_int']",
"//div[@id='articleComments']",
"//div[@id='articleCopyright']",
"//div[@id='article_toolbar']",
"//div[@id='banP4']",
"//div[@id='gazeta_article_author']",
"//div[@id='gazeta_article_brand']",
"//div[@id='gazeta_article_image']",
"//div[@id='gazeta_article_likes']",
"//div[@id='gazeta_article_share']",
"//div[@id='gazeta_article_tags']",
"//div[@id='gazeta_article_tools']",
"//div[@id='recommendations']",
"//div[@id='socialNewTools']",
"//h3[@id='tags']",
"//ul[@id='articleToolbar']",
'//like',
'//link',
'//meta',
'//script',
'//style',
)
pacify_all(document,
'//img',
)
return document
# vim:ts=4 sw=4 et | 0.214362 | 0.057812 |
import psutil, base64, os, sys, hashlib, datetime, discord, random
from PIL import Image, ImageDraw, ImageFont
import configparser
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
if __name__=="__main__":
print("FATAL : Run this bot from right way.")
sys.exit(1)
e_txt = ["오류 발생!",
"개발진들이 마실 카페인이 늘어났어요!",
"기계식 루냥이.EXE는 '우에엥 도와줘'를 사용했다!",
"개발진들이 C++의 놀라움을 경험했습니다",
"개발진들이 파이썬의 놀라움을 경험했습니다",
"동작 중이던 코드가 이세계행 트럭과 부딪혔습니다",
"개발진들이 현실을 부정하기 시작했습니다"]
db_path = "db/username_db.dat"
db = configparser.ConfigParser()
db.read(db_path)
def getHash(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def base64e(s):
e = base64.b64encode(s.encode('utf-8'))
e = str(e).replace("b'", "")
e = e.replace("'", "")
return e
def base64d(b):
return str(base64.b64decode(b).decode('utf-8'))
def checkIfProcessRunning(processName):
for proc in psutil.process_iter():
try:
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False;
def checkTrait(text):
c = text[-1:]
if int((ord(c) - 0xAC00) % 28) != 0:
return "을"
else:
return "를"
def outline_draw(d, text, x, y, rb=0, gb=0, bb=0, rf=255, gf=255, bf=255):
d.text((x-1, y), text, fill=(rb, gb, bb))
d.text((x+1, y), text, fill=(rb, gb, bb))
d.text((x, y-1), text, fill=(rb, gb, bb))
d.text((x, y+1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y-1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y+1), text, fill=(rb, gb, bb))
d.text((x, y), text, fill=(rf, gf, bf))
def make_color(text, head, mode):
if mode:
m = text.replace(head + "색상 ", "")
else:
m = text.replace(head + "색상코드 ", "")
m = m[-6:]
m = m.upper()
ms = "color hex #" + m
try:
h = tuple(int(m[i:i+2], 16) for i in (0, 2, 4))
except:
raise ValueError
img = Image.new('RGB', (200, 120), color = h)
d = ImageDraw.Draw(img)
outline_draw(d, ms, 10, 10)
outline_draw(d, "red : " + str(h[0]) + "(" + str(int((h[0] / 255) * 100)) + "%)", 10, 24, 255, 0, 0)
outline_draw(d, "green : " + str(h[1]) + "(" + str(int((h[1] / 255) * 100)) + "%)", 10, 38, 0, 255, 0)
outline_draw(d, "blue : " + str(h[2]) + "(" + str(int((h[2] / 255) * 100)) + "%)", 10, 52, 0, 0, 255)
d.text((10, 66), "white text", fill=(255, 255, 255))
d.text((10, 80), "black text", fill=(0, 0, 0))
img.save('pil_color.png')
return "pil_color.png"
def make_pil(text, head):
m = text.replace(head + "받아쓰기 ", "")
m = m.encode('utf-8')
font = ImageFont.truetype("font/kopub.ttf", 20, encoding='unic')
img = Image.new('RGB', (320, 240), color = (255, 255, 255))
d = ImageDraw.Draw(img)
d.text((10, 10), m.decode('utf-8'), fill=(0, 0, 0), font=font)
img.save('pil_color.png')
return "pil_color.png"
def get_name(id):
try:
n = db.get("name", str(id))
return n
except:
return None
def set_name(message):
try:
db.set("name", str(message.author.id), str(message.author.name))
with open(db_path, 'w') as configfile:
db.write(configfile)
except:
pass
def err_txt():
return random.choice(e_txt) | modules/m_etc.py | import psutil, base64, os, sys, hashlib, datetime, discord, random
from PIL import Image, ImageDraw, ImageFont
import configparser
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
if __name__=="__main__":
print("FATAL : Run this bot from right way.")
sys.exit(1)
e_txt = ["오류 발생!",
"개발진들이 마실 카페인이 늘어났어요!",
"기계식 루냥이.EXE는 '우에엥 도와줘'를 사용했다!",
"개발진들이 C++의 놀라움을 경험했습니다",
"개발진들이 파이썬의 놀라움을 경험했습니다",
"동작 중이던 코드가 이세계행 트럭과 부딪혔습니다",
"개발진들이 현실을 부정하기 시작했습니다"]
db_path = "db/username_db.dat"
db = configparser.ConfigParser()
db.read(db_path)
def getHash(path, blocksize=65536):
afile = open(path, 'rb')
hasher = hashlib.md5()
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
afile.close()
return hasher.hexdigest()
def base64e(s):
e = base64.b64encode(s.encode('utf-8'))
e = str(e).replace("b'", "")
e = e.replace("'", "")
return e
def base64d(b):
return str(base64.b64decode(b).decode('utf-8'))
def checkIfProcessRunning(processName):
for proc in psutil.process_iter():
try:
if processName.lower() in proc.name().lower():
return True
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return False;
def checkTrait(text):
c = text[-1:]
if int((ord(c) - 0xAC00) % 28) != 0:
return "을"
else:
return "를"
def outline_draw(d, text, x, y, rb=0, gb=0, bb=0, rf=255, gf=255, bf=255):
d.text((x-1, y), text, fill=(rb, gb, bb))
d.text((x+1, y), text, fill=(rb, gb, bb))
d.text((x, y-1), text, fill=(rb, gb, bb))
d.text((x, y+1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y-1), text, fill=(rb, gb, bb))
d.text((x-1, y-1), text, fill=(rb, gb, bb))
d.text((x+1, y+1), text, fill=(rb, gb, bb))
d.text((x, y), text, fill=(rf, gf, bf))
def make_color(text, head, mode):
if mode:
m = text.replace(head + "색상 ", "")
else:
m = text.replace(head + "색상코드 ", "")
m = m[-6:]
m = m.upper()
ms = "color hex #" + m
try:
h = tuple(int(m[i:i+2], 16) for i in (0, 2, 4))
except:
raise ValueError
img = Image.new('RGB', (200, 120), color = h)
d = ImageDraw.Draw(img)
outline_draw(d, ms, 10, 10)
outline_draw(d, "red : " + str(h[0]) + "(" + str(int((h[0] / 255) * 100)) + "%)", 10, 24, 255, 0, 0)
outline_draw(d, "green : " + str(h[1]) + "(" + str(int((h[1] / 255) * 100)) + "%)", 10, 38, 0, 255, 0)
outline_draw(d, "blue : " + str(h[2]) + "(" + str(int((h[2] / 255) * 100)) + "%)", 10, 52, 0, 0, 255)
d.text((10, 66), "white text", fill=(255, 255, 255))
d.text((10, 80), "black text", fill=(0, 0, 0))
img.save('pil_color.png')
return "pil_color.png"
def make_pil(text, head):
m = text.replace(head + "받아쓰기 ", "")
m = m.encode('utf-8')
font = ImageFont.truetype("font/kopub.ttf", 20, encoding='unic')
img = Image.new('RGB', (320, 240), color = (255, 255, 255))
d = ImageDraw.Draw(img)
d.text((10, 10), m.decode('utf-8'), fill=(0, 0, 0), font=font)
img.save('pil_color.png')
return "pil_color.png"
def get_name(id):
try:
n = db.get("name", str(id))
return n
except:
return None
def set_name(message):
try:
db.set("name", str(message.author.id), str(message.author.name))
with open(db_path, 'w') as configfile:
db.write(configfile)
except:
pass
def err_txt():
return random.choice(e_txt) | 0.121308 | 0.282153 |
import frappe
import json
from empg_erp.utils import get_post_body,get_config_by_name,str_to_date
from datetime import datetime, timedelta
import time
from empg_erp.modules.common.error_handler import ErrorHandler
from empg_erp.modules.mustang.employee.employee_roster import EmployeeRoster
from empg_erp.constants.globals import ATTENDANCE_STATUS_ABSENT, ATTENDANCE_STATUS_PRESENT
from frappe import _
from erpnext.hr.utils import divide_chunks
_BODY = []
_VALID_EMPLOYEES = dict()
def add_or_update_attendance(emp_obj=dict(),obj=dict()):
attendance_id = frappe.db.get_value("Attendance",
{
"employee": emp_obj.get("name"),
"attendance_date" : obj.get("attendance_date"),
"docstatus" : 1
},
"name"
)
try:
if attendance_id is not None:
attendance_doc = frappe.get_doc("Attendance", attendance_id)
new_att_doc = frappe.copy_doc(attendance_doc)
new_att_doc.amended_from = attendance_doc.name
attendance_doc.flags.ignore_permissions = True
attendance_doc.cancel()
else:
new_att_doc = frappe.new_doc("Attendance")
new_att_doc.employee = emp_obj.get("name")
new_att_doc.attendance_date = obj.get("attendance_date")
if obj.get("status") is not None:
new_att_doc.status = obj.get("status")
if obj.get("shift_start_time") is not None and obj.get("shift_start_time"):
new_att_doc.shift_start_time = str(datetime.strptime(obj.get("shift_start_time"), '%Y-%m-%d %H:%M:%S').time())
if obj.get("shift_end_time") is not None and obj.get("shift_end_time"):
new_att_doc.shift_end_time = str(datetime.strptime(obj.get("shift_end_time"), '%Y-%m-%d %H:%M:%S').time())
if obj.get("attendance_status") is not None and obj.get("attendance_status"):
new_att_doc.attendance_status = obj.get("attendance_status")
if (new_att_doc.check_in or new_att_doc.check_out) and new_att_doc.status == ATTENDANCE_STATUS_ABSENT:
new_att_doc.status = ATTENDANCE_STATUS_PRESENT
new_att_doc.modified_by = frappe.session.user
new_att_doc.set_by_cron = 0
new_att_doc.flags.ignore_permissions = True
result = new_att_doc.save()
new_att_doc.submit()
return True, {
"record":new_att_doc,
"name":new_att_doc.name,
"object_id":obj.get("object_id")
}
except Exception as err:
ErrorHandler.log_error(str(err))
return False, str(err)
def validate_required_fields():
_params = ["object_id","shift_start_time","shift_end_time","attendance_status","email","attendance_date"]
_required_err = False
_errors = []
_invalid_employees = []
_idx = -1
_all_roster_statuses = dict()
global _BODY
global _VALID_EMPLOYEES
_roster_mapping = get_config_by_name("ROSTER_MAPPINGS",{})
_roster_statuses = _roster_mapping.get(frappe.session.user)
if _roster_statuses is not None and _roster_statuses:
for att in _BODY:
_idx +=1
_err = False
for key in _params:
if key not in att:
_required_err = True
_err = True
att["error"] = "{} is required.".format(key)
_errors.append(att)
break
if _err == False:
_err = True
if att.get("email") not in _VALID_EMPLOYEES:
''' If user is not in valid employees then get employee code by email and update valid_employees and invalid_employees'''
if att.get("email") in _invalid_employees:
''' If not employee associated with email then break current itration of loop with error'''
continue
employee_obj = frappe.db.get_value('Employee', {'user_id': att.get("email")}, ['name','employee_name','department','sub_department'],as_dict=True)
if employee_obj:
_VALID_EMPLOYEES[att.get("email")] = employee_obj
else:
_invalid_employees.append(att.get("email"))
att["error"] = "No Employee associated with {0}.".format(att.get("email"))
_errors.append(att)
continue
''' Validate shift start,end time and attendance date'''
if att.get("shift_start_time"):
try:
time.strptime(att.get("shift_start_time"), '%Y-%m-%d %H:%M:%S')
except ValueError:
att["error"] = "Invalid shift start time {0}.".format(att.get("shift_start_time"))
_errors.append(att)
continue
if att.get("shift_end_time"):
try:
time.strptime(att.get("shift_end_time"), '%Y-%m-%d %H:%M:%S')
except ValueError:
att["error"] = "Invalid shift end time {0}.".format(att.get("shift_end_time"))
_errors.append(att)
continue
''' Validate shift start,end time'''
if att.get("shift_start_time") and att.get("shift_end_time"):
try:
if time.strptime(att.get("shift_end_time"), '%Y-%m-%d %H:%M:%S') < time.strptime(att.get("shift_start_time"), '%Y-%m-%d %H:%M:%S'):
att["error"] = "Shift end time must be greater than shift start time."
_errors.append(att)
continue
except ValueError:
att["error"] = "Invalid shift start or end time {0}, {1}.".format(att.get("shift_start_time"),att.get("shift_end_time"))
_errors.append(att)
continue
if att.get("attendance_date"):
try:
att["attendance_date"] = datetime.strptime(att.get("attendance_date"), '%Y-%m-%d').date()
except ValueError:
att["error"] = "Invalid attendance date {0}.".format(att.get("attendance_date"))
_errors.append(att)
continue
''' Check attendance date is before or after cutoff date'''
if EmployeeRoster.is_cutoff_passed(att.get("attendance_date")) == True:
att["error"] = "Cannot update roster on date {0} before cutoff Date.".format(att.get("attendance_date"))
_errors.append(att)
continue
''' Check maping of roster status with attendance status'''
_status = _roster_statuses.get(str(att.get("attendance_status")))
if _status and _status not in _all_roster_statuses:
status = frappe.db.get_value("Roster Status", {"name" : _status}, "parent_status")
if status is not None and status:
_all_roster_statuses[_status] = status
else:
att["error"] = "Roster status {0} is not linked with any Attendance Status.".format(att.get("attendance_status"))
_errors.append(att)
continue
elif not _status:
att["error"] = "Roster status {0} is not linked with any Attendance Status.".format(att.get("attendance_status"))
_errors.append(att)
continue
_BODY[_idx]["attendance_status"] = _status
_BODY[_idx]["status"] = _all_roster_statuses[_status]
_err = False
if _err == True:
_required_err = True
else:
_errors.append(_("Roster Statuses mapping not found."))
return _required_err, _errors
@frappe.whitelist()
def sync():
''' Fucntion to sync other departments roster with empghr roster '''
_max_iterations = 10000
_errors = []
_success = []
_iteration = 0
response = dict()
global _BODY
global _VALID_EMPLOYEES
_BODY = get_post_body()
''' Get allowed user to sync attendance '''
users = get_config_by_name("ATTENDANCE_SYNC_USERS",[])
if frappe.session.user in users:
if _BODY:
validation_err,_errors = validate_required_fields()
if validation_err == False and len(_errors)<1:
_chunk = divide_chunks(_BODY, 20)
while True or _iteration <= _max_iterations:
try:
_data = next(_chunk)
for att in _data:
''' Add or update attendance'''
status, result_obj = add_or_update_attendance(_VALID_EMPLOYEES[att.get("email")],att)
if status == False and result_obj:
att["error"] = result_obj
_errors.append(att)
else:
_success.append(result_obj)
_iteration += 1
frappe.db.commit()
except StopIteration:
break
response = {
"code":200,
"success":_success
}
else:
response = {
"code":201,
"error": _errors
}
return response
else:
return {
"code":403,
"error": [_("You are not allowed to sync roster/attendance.")]
} | empg_erp/modules/mustang/attendance/sync_attendance.py | import frappe
import json
from empg_erp.utils import get_post_body,get_config_by_name,str_to_date
from datetime import datetime, timedelta
import time
from empg_erp.modules.common.error_handler import ErrorHandler
from empg_erp.modules.mustang.employee.employee_roster import EmployeeRoster
from empg_erp.constants.globals import ATTENDANCE_STATUS_ABSENT, ATTENDANCE_STATUS_PRESENT
from frappe import _
from erpnext.hr.utils import divide_chunks
_BODY = []
_VALID_EMPLOYEES = dict()
def add_or_update_attendance(emp_obj=dict(),obj=dict()):
attendance_id = frappe.db.get_value("Attendance",
{
"employee": emp_obj.get("name"),
"attendance_date" : obj.get("attendance_date"),
"docstatus" : 1
},
"name"
)
try:
if attendance_id is not None:
attendance_doc = frappe.get_doc("Attendance", attendance_id)
new_att_doc = frappe.copy_doc(attendance_doc)
new_att_doc.amended_from = attendance_doc.name
attendance_doc.flags.ignore_permissions = True
attendance_doc.cancel()
else:
new_att_doc = frappe.new_doc("Attendance")
new_att_doc.employee = emp_obj.get("name")
new_att_doc.attendance_date = obj.get("attendance_date")
if obj.get("status") is not None:
new_att_doc.status = obj.get("status")
if obj.get("shift_start_time") is not None and obj.get("shift_start_time"):
new_att_doc.shift_start_time = str(datetime.strptime(obj.get("shift_start_time"), '%Y-%m-%d %H:%M:%S').time())
if obj.get("shift_end_time") is not None and obj.get("shift_end_time"):
new_att_doc.shift_end_time = str(datetime.strptime(obj.get("shift_end_time"), '%Y-%m-%d %H:%M:%S').time())
if obj.get("attendance_status") is not None and obj.get("attendance_status"):
new_att_doc.attendance_status = obj.get("attendance_status")
if (new_att_doc.check_in or new_att_doc.check_out) and new_att_doc.status == ATTENDANCE_STATUS_ABSENT:
new_att_doc.status = ATTENDANCE_STATUS_PRESENT
new_att_doc.modified_by = frappe.session.user
new_att_doc.set_by_cron = 0
new_att_doc.flags.ignore_permissions = True
result = new_att_doc.save()
new_att_doc.submit()
return True, {
"record":new_att_doc,
"name":new_att_doc.name,
"object_id":obj.get("object_id")
}
except Exception as err:
ErrorHandler.log_error(str(err))
return False, str(err)
def validate_required_fields():
_params = ["object_id","shift_start_time","shift_end_time","attendance_status","email","attendance_date"]
_required_err = False
_errors = []
_invalid_employees = []
_idx = -1
_all_roster_statuses = dict()
global _BODY
global _VALID_EMPLOYEES
_roster_mapping = get_config_by_name("ROSTER_MAPPINGS",{})
_roster_statuses = _roster_mapping.get(frappe.session.user)
if _roster_statuses is not None and _roster_statuses:
for att in _BODY:
_idx +=1
_err = False
for key in _params:
if key not in att:
_required_err = True
_err = True
att["error"] = "{} is required.".format(key)
_errors.append(att)
break
if _err == False:
_err = True
if att.get("email") not in _VALID_EMPLOYEES:
''' If user is not in valid employees then get employee code by email and update valid_employees and invalid_employees'''
if att.get("email") in _invalid_employees:
''' If not employee associated with email then break current itration of loop with error'''
continue
employee_obj = frappe.db.get_value('Employee', {'user_id': att.get("email")}, ['name','employee_name','department','sub_department'],as_dict=True)
if employee_obj:
_VALID_EMPLOYEES[att.get("email")] = employee_obj
else:
_invalid_employees.append(att.get("email"))
att["error"] = "No Employee associated with {0}.".format(att.get("email"))
_errors.append(att)
continue
''' Validate shift start,end time and attendance date'''
if att.get("shift_start_time"):
try:
time.strptime(att.get("shift_start_time"), '%Y-%m-%d %H:%M:%S')
except ValueError:
att["error"] = "Invalid shift start time {0}.".format(att.get("shift_start_time"))
_errors.append(att)
continue
if att.get("shift_end_time"):
try:
time.strptime(att.get("shift_end_time"), '%Y-%m-%d %H:%M:%S')
except ValueError:
att["error"] = "Invalid shift end time {0}.".format(att.get("shift_end_time"))
_errors.append(att)
continue
''' Validate shift start,end time'''
if att.get("shift_start_time") and att.get("shift_end_time"):
try:
if time.strptime(att.get("shift_end_time"), '%Y-%m-%d %H:%M:%S') < time.strptime(att.get("shift_start_time"), '%Y-%m-%d %H:%M:%S'):
att["error"] = "Shift end time must be greater than shift start time."
_errors.append(att)
continue
except ValueError:
att["error"] = "Invalid shift start or end time {0}, {1}.".format(att.get("shift_start_time"),att.get("shift_end_time"))
_errors.append(att)
continue
if att.get("attendance_date"):
try:
att["attendance_date"] = datetime.strptime(att.get("attendance_date"), '%Y-%m-%d').date()
except ValueError:
att["error"] = "Invalid attendance date {0}.".format(att.get("attendance_date"))
_errors.append(att)
continue
''' Check attendance date is before or after cutoff date'''
if EmployeeRoster.is_cutoff_passed(att.get("attendance_date")) == True:
att["error"] = "Cannot update roster on date {0} before cutoff Date.".format(att.get("attendance_date"))
_errors.append(att)
continue
''' Check maping of roster status with attendance status'''
_status = _roster_statuses.get(str(att.get("attendance_status")))
if _status and _status not in _all_roster_statuses:
status = frappe.db.get_value("Roster Status", {"name" : _status}, "parent_status")
if status is not None and status:
_all_roster_statuses[_status] = status
else:
att["error"] = "Roster status {0} is not linked with any Attendance Status.".format(att.get("attendance_status"))
_errors.append(att)
continue
elif not _status:
att["error"] = "Roster status {0} is not linked with any Attendance Status.".format(att.get("attendance_status"))
_errors.append(att)
continue
_BODY[_idx]["attendance_status"] = _status
_BODY[_idx]["status"] = _all_roster_statuses[_status]
_err = False
if _err == True:
_required_err = True
else:
_errors.append(_("Roster Statuses mapping not found."))
return _required_err, _errors
@frappe.whitelist()
def sync():
''' Fucntion to sync other departments roster with empghr roster '''
_max_iterations = 10000
_errors = []
_success = []
_iteration = 0
response = dict()
global _BODY
global _VALID_EMPLOYEES
_BODY = get_post_body()
''' Get allowed user to sync attendance '''
users = get_config_by_name("ATTENDANCE_SYNC_USERS",[])
if frappe.session.user in users:
if _BODY:
validation_err,_errors = validate_required_fields()
if validation_err == False and len(_errors)<1:
_chunk = divide_chunks(_BODY, 20)
while True or _iteration <= _max_iterations:
try:
_data = next(_chunk)
for att in _data:
''' Add or update attendance'''
status, result_obj = add_or_update_attendance(_VALID_EMPLOYEES[att.get("email")],att)
if status == False and result_obj:
att["error"] = result_obj
_errors.append(att)
else:
_success.append(result_obj)
_iteration += 1
frappe.db.commit()
except StopIteration:
break
response = {
"code":200,
"success":_success
}
else:
response = {
"code":201,
"error": _errors
}
return response
else:
return {
"code":403,
"error": [_("You are not allowed to sync roster/attendance.")]
} | 0.21767 | 0.063453 |
import sys
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords', 'averaged_perceptron_tagger'])
import pandas as pd
import numpy as np
import re
import pickle
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import f1_score, classification_report, fbeta_score, accuracy_score
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.base import BaseEstimator, TransformerMixin
url_regex = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
def load_data(database_filepath):
"""
Input:
Reading in database filepath
Output:
Returning three variables for further Analysis.
X: dependent variable that contains all the text messages to analyze
y: independent variable (1 or 0) that contains all the possible outcomes according to the analyized text message.
categories: categorizes the independent variables in a list.
"""
# load data from database
db_name = 'sqlite:///{}'.format(database_filepath)
engine = create_engine(db_name)
# reading the SQL datafile into a dataframe
df = pd.read_sql('DisasterResponse', engine)
# splitting into variables X, y and categories
X = df['message'].values
y = df.iloc[:, 4:]
categories = list(df.columns[4:])
return X, y, categories
def tokenize(text):
"""
Tokenizing the text input.
Output:
clean_tokens (List): list of tokenized words for ML algorithm
"""
# removing urls
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# Normalize
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize
tokens = word_tokenize(text)
# Remove Stop Words
words = [w for w in tokens if w not in stopwords.words('english')]
lemmatizer = WordNetLemmatizer()
# Create a list of clean tokens
clean = [lemmatizer.lemmatize(w, pos='n').strip() for w in words]
clean_tokens = [lemmatizer.lemmatize(w, pos='v').strip() for w in clean]
return clean_tokens
def build_model():
"""
Building a machine learning pipeline that processes text messages.
Uses different NLP and ML models to detect and classify multiple outputs.
Output:
Pipeline containing different ML models.
"""
# creating a multiple output classifier
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
# parameters to grid search
parameters = { 'clf__estimator__n_estimators' : [50,65,80,95] }
# initiating GridSearchCV method
model = GridSearchCV(pipeline, param_grid=parameters)
return model
def evaluate_model(model, X_test, Y_test, category_names):
"""
Predicting the results based on the test data given.
Comparing test results based on the test data with the real results.
"""
# predict on test data
Y_pred = model.predict(X_test)
for i in range(len(category_names)):
print("Category:", category_names[i],"\n", classification_report(Y_test.iloc[:, i].values, Y_pred[:, i]))
print('Accuracy of %25s: %.2f' %(category_names[i], accuracy_score(Y_test.iloc[:, i].values, Y_pred[:,i])))
def save_model(model, model_filepath):
"""
Export your model as a pickle file.
Saves trained model as pickle file to be loaded later.
"""
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | models/train_classifier.py | import sys
import nltk
nltk.download(['punkt', 'wordnet', 'stopwords', 'averaged_perceptron_tagger'])
import pandas as pd
import numpy as np
import re
import pickle
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.metrics import f1_score, classification_report, fbeta_score, accuracy_score
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.base import BaseEstimator, TransformerMixin
url_regex = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
def load_data(database_filepath):
"""
Input:
Reading in database filepath
Output:
Returning three variables for further Analysis.
X: dependent variable that contains all the text messages to analyze
y: independent variable (1 or 0) that contains all the possible outcomes according to the analyized text message.
categories: categorizes the independent variables in a list.
"""
# load data from database
db_name = 'sqlite:///{}'.format(database_filepath)
engine = create_engine(db_name)
# reading the SQL datafile into a dataframe
df = pd.read_sql('DisasterResponse', engine)
# splitting into variables X, y and categories
X = df['message'].values
y = df.iloc[:, 4:]
categories = list(df.columns[4:])
return X, y, categories
def tokenize(text):
"""
Tokenizing the text input.
Output:
clean_tokens (List): list of tokenized words for ML algorithm
"""
# removing urls
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# Normalize
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize
tokens = word_tokenize(text)
# Remove Stop Words
words = [w for w in tokens if w not in stopwords.words('english')]
lemmatizer = WordNetLemmatizer()
# Create a list of clean tokens
clean = [lemmatizer.lemmatize(w, pos='n').strip() for w in words]
clean_tokens = [lemmatizer.lemmatize(w, pos='v').strip() for w in clean]
return clean_tokens
def build_model():
"""
Building a machine learning pipeline that processes text messages.
Uses different NLP and ML models to detect and classify multiple outputs.
Output:
Pipeline containing different ML models.
"""
# creating a multiple output classifier
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
# parameters to grid search
parameters = { 'clf__estimator__n_estimators' : [50,65,80,95] }
# initiating GridSearchCV method
model = GridSearchCV(pipeline, param_grid=parameters)
return model
def evaluate_model(model, X_test, Y_test, category_names):
"""
Predicting the results based on the test data given.
Comparing test results based on the test data with the real results.
"""
# predict on test data
Y_pred = model.predict(X_test)
for i in range(len(category_names)):
print("Category:", category_names[i],"\n", classification_report(Y_test.iloc[:, i].values, Y_pred[:, i]))
print('Accuracy of %25s: %.2f' %(category_names[i], accuracy_score(Y_test.iloc[:, i].values, Y_pred[:,i])))
def save_model(model, model_filepath):
"""
Export your model as a pickle file.
Saves trained model as pickle file to be loaded later.
"""
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 0.531696 | 0.425963 |
from collections import namedtuple
import io
import os
import git
import pexpect
import pytest
Command = namedtuple('Command', ('cmd', 'return_code', 'out'))
class Koan:
TIMEOUT = 3
def __init__(self, tmpdir_factory):
self.verbose = False
self._workspace = tmpdir_factory.mktemp('workspace')
self._upstream = tmpdir_factory.mktemp('upstream')
self._commands = []
self._say('\n')
def assert_repo(self, relative_path='.'):
assert self.get_repo(relative_path), 'Repository has not been created.' + self.commands_debug()
def assert_commands(self):
for c in self._commands:
assert c.return_code == 0, \
f'Command "{c.cmd}" finished with a non-zero status ({c.return_code}).' + self.commands_debug()
def assert_remotes(self, expected_remotes=None, relative_path='.'):
assert expected_remotes is not None, 'assert_remotes cannot be called with `None` as `expected_remotes`'
repo = self.get_repo(relative_path)
remotes = {(remote.name, url) for remote in repo.remotes for url in remote.urls}
for r in expected_remotes:
assert r in remotes, f'Expected remote: {r} is not present in remote urls: {remotes}'
unexpected_remotes = remotes.difference(expected_remotes)
assert not unexpected_remotes, f'There are some unexpected remotes: {unexpected_remotes}'
def get_repo(self, relative_path='.'):
try:
repo = git.Repo(os.path.join(self.workspace, relative_path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError):
repo = None
return repo
@property
def repo(self):
""" Convenience wrapper for self.get_repo('.') """
return self.get_repo('.')
def _say(self, s):
if self.verbose:
print(s)
@property
def workspace(self):
return str(self._workspace)
@property
def upstream(self):
return str(self._upstream)
def shell(self, command, interactive=False, cwd='.'):
if not command:
pytest.fail('Cannot run an empty command!')
pretty_cwd = f'({cwd})' if cwd != '.' else ''
self._say(f'{pretty_cwd}$ {command}')
out = io.StringIO()
p = pexpect.spawn(command, cwd=os.path.join(self.workspace, cwd), logfile=out, encoding='utf8')
if interactive:
p.logfile = None
p.interact()
else:
try:
p.expect(pexpect.EOF, timeout=self.TIMEOUT)
except pexpect.TIMEOUT:
print(f"Command `{command}` timed-out -- moving into interactive mode. "
f"Consider using ctrl-c to stop the command if it's not responding.")
p.logfile = None
p.interact()
p.wait()
out.seek(0)
self._commands.append(Command(command, p.exitstatus, str(out.read())))
def edit(self, file, cwd='.', editor='editor'):
self.shell(f'{editor} {file}', interactive=True, cwd=cwd)
def commands_debug(self):
buffer = ''
for i, c in enumerate(self._commands):
buffer += (f'''
# Command ({i+1}/{len(self._commands)}): "{c.cmd}":
# exit code: {c.return_code}
# output:
{c.out}
''')
if self.verbose:
return buffer
return buffer.replace(str(self.workspace), '.') | git_koan/koan.py | from collections import namedtuple
import io
import os
import git
import pexpect
import pytest
Command = namedtuple('Command', ('cmd', 'return_code', 'out'))
class Koan:
TIMEOUT = 3
def __init__(self, tmpdir_factory):
self.verbose = False
self._workspace = tmpdir_factory.mktemp('workspace')
self._upstream = tmpdir_factory.mktemp('upstream')
self._commands = []
self._say('\n')
def assert_repo(self, relative_path='.'):
assert self.get_repo(relative_path), 'Repository has not been created.' + self.commands_debug()
def assert_commands(self):
for c in self._commands:
assert c.return_code == 0, \
f'Command "{c.cmd}" finished with a non-zero status ({c.return_code}).' + self.commands_debug()
def assert_remotes(self, expected_remotes=None, relative_path='.'):
assert expected_remotes is not None, 'assert_remotes cannot be called with `None` as `expected_remotes`'
repo = self.get_repo(relative_path)
remotes = {(remote.name, url) for remote in repo.remotes for url in remote.urls}
for r in expected_remotes:
assert r in remotes, f'Expected remote: {r} is not present in remote urls: {remotes}'
unexpected_remotes = remotes.difference(expected_remotes)
assert not unexpected_remotes, f'There are some unexpected remotes: {unexpected_remotes}'
def get_repo(self, relative_path='.'):
try:
repo = git.Repo(os.path.join(self.workspace, relative_path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError):
repo = None
return repo
@property
def repo(self):
""" Convenience wrapper for self.get_repo('.') """
return self.get_repo('.')
def _say(self, s):
if self.verbose:
print(s)
@property
def workspace(self):
return str(self._workspace)
@property
def upstream(self):
return str(self._upstream)
def shell(self, command, interactive=False, cwd='.'):
if not command:
pytest.fail('Cannot run an empty command!')
pretty_cwd = f'({cwd})' if cwd != '.' else ''
self._say(f'{pretty_cwd}$ {command}')
out = io.StringIO()
p = pexpect.spawn(command, cwd=os.path.join(self.workspace, cwd), logfile=out, encoding='utf8')
if interactive:
p.logfile = None
p.interact()
else:
try:
p.expect(pexpect.EOF, timeout=self.TIMEOUT)
except pexpect.TIMEOUT:
print(f"Command `{command}` timed-out -- moving into interactive mode. "
f"Consider using ctrl-c to stop the command if it's not responding.")
p.logfile = None
p.interact()
p.wait()
out.seek(0)
self._commands.append(Command(command, p.exitstatus, str(out.read())))
def edit(self, file, cwd='.', editor='editor'):
self.shell(f'{editor} {file}', interactive=True, cwd=cwd)
def commands_debug(self):
buffer = ''
for i, c in enumerate(self._commands):
buffer += (f'''
# Command ({i+1}/{len(self._commands)}): "{c.cmd}":
# exit code: {c.return_code}
# output:
{c.out}
''')
if self.verbose:
return buffer
return buffer.replace(str(self.workspace), '.') | 0.596433 | 0.236362 |
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
import numpy as np
import pandas as pd
class analysis:
def __init__(self):
pass
def _getComplexParams(self, abs=False):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfComplex_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getSimpleParams(self):
"""
Function for extracting the data associated with
the simple sources.
To call:
_getSimpleParams()
Parameters:
None
Postcondition:
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 0)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux
# ===================================================
flux = self.testFlux_[loc]
# ===================================================
# Extract the phase
# ===================================================
chi = self.testChi_[loc]
# ===================================================
# Extract the Faraday depth
# ===================================================
depth = self.testDepth_[loc]
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfSimple_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getF1(self, step=0.025, save=False, suffix='', dir='./'):
try:
self.testProb_
except:
self._test()
threshold = np.arange(0.5, 1, step)
F1 = np.zeros_like(threshold)
for i, p in enumerate(threshold):
testPred = np.where(self.testProb_ > p, 1, 0)
F1[i] = f1_score(self.testLabel_, testPred)
self.threshold_ = threshold
self.F1_ = F1
if save:
np.save(dir + 'threshold' + suffix + '.npy', threshold)
np.save(dir + 'F1' + suffix + '.npy', F1)
def _getROC(self, data='test', save=False, suffix='', dir='./'):
try:
if data == 'train':
fpr, tpr, thresh = roc_curve(self.trainLabel_, self.trainProb_)
elif data == 'valid':
fpr, tpr, thresh = roc_curve(self.validLabel_, self.validProb_)
else:
fpr, tpr, thresh = roc_curve(self.testLabel_, self.testProb_)
except:
print("No data found. Aborting.")
sys.exit(1)
self.fpr_ = fpr
self.tpr_ = tpr
if save:
np.save(dir + 'fpr' + suffix + '.npy', fpr)
np.save(dir + 'tpr' + suffix + '.npy', tpr) | regularized/analysis.py | from sklearn.metrics import confusion_matrix, f1_score, roc_curve
import numpy as np
import pandas as pd
class analysis:
def __init__(self):
pass
def _getComplexParams(self, abs=False):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfComplex_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getSimpleParams(self):
"""
Function for extracting the data associated with
the simple sources.
To call:
_getSimpleParams()
Parameters:
None
Postcondition:
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 0)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux
# ===================================================
flux = self.testFlux_[loc]
# ===================================================
# Extract the phase
# ===================================================
chi = self.testChi_[loc]
# ===================================================
# Extract the Faraday depth
# ===================================================
depth = self.testDepth_[loc]
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfSimple_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getF1(self, step=0.025, save=False, suffix='', dir='./'):
try:
self.testProb_
except:
self._test()
threshold = np.arange(0.5, 1, step)
F1 = np.zeros_like(threshold)
for i, p in enumerate(threshold):
testPred = np.where(self.testProb_ > p, 1, 0)
F1[i] = f1_score(self.testLabel_, testPred)
self.threshold_ = threshold
self.F1_ = F1
if save:
np.save(dir + 'threshold' + suffix + '.npy', threshold)
np.save(dir + 'F1' + suffix + '.npy', F1)
def _getROC(self, data='test', save=False, suffix='', dir='./'):
try:
if data == 'train':
fpr, tpr, thresh = roc_curve(self.trainLabel_, self.trainProb_)
elif data == 'valid':
fpr, tpr, thresh = roc_curve(self.validLabel_, self.validProb_)
else:
fpr, tpr, thresh = roc_curve(self.testLabel_, self.testProb_)
except:
print("No data found. Aborting.")
sys.exit(1)
self.fpr_ = fpr
self.tpr_ = tpr
if save:
np.save(dir + 'fpr' + suffix + '.npy', fpr)
np.save(dir + 'tpr' + suffix + '.npy', tpr) | 0.566858 | 0.495606 |
import datetime
import os
import boto3
import dateutil.parser
import dateutil.tz
from make_table import ScrapedJob, session_scope
SQS_QUEUE = os.environ.get('SQS_QUEUE')
SQS_REGION = os.environ.get('SQS_REGION')
RDS_CREDENTIALS = os.environ.get('RDS_CREDENTIALS')
def reformat_sqs_message(message):
job = {}
for key, value in message.message_attributes.iteritems():
job[key] = value['StringValue']
return job
def main():
with session_scope(RDS_CREDENTIALS) as session:
sqs = boto3.resource('sqs', region_name=SQS_REGION)
queue = sqs.get_queue_by_name(QueueName=SQS_QUEUE)
for _ in xrange(100):
messages = queue.receive_messages(MaxNumberOfMessages=10, MessageAttributeNames=['All'])
if messages:
for message in messages:
# data = reformat_sqs_message(message)
# job = ScrapedJob(url=data['url'], created_at=dateutil.parser.parse(data['posted']), data=data)
job = ScrapedJob.from_dict(reformat_sqs_message(message))
query = session.query(ScrapedJob).filter(ScrapedJob.url == job.url)
matched_job = query.one_or_none()
if matched_job is None:
# it's a new job, since it hasn't been seen before
session.add(job)
else:
if job.created_at.tzinfo is None:
job.created_at = job.created_at.replace(tzinfo=dateutil.tz.tzutc())
if job.created_at < matched_job.created_at:
# new record has an older timestamp
matched_job.created_at = job.created_at
# modifying the existing record will cause it to be marked as dirty
# so when the session is committed it will emit an UPDATE for the row
# completed message handling
queue.delete_messages(Entries=[
{'Id': m.message_id, 'ReceiptHandle': m.receipt_handle} for m in messages]) | poll_sqs.py | import datetime
import os
import boto3
import dateutil.parser
import dateutil.tz
from make_table import ScrapedJob, session_scope
SQS_QUEUE = os.environ.get('SQS_QUEUE')
SQS_REGION = os.environ.get('SQS_REGION')
RDS_CREDENTIALS = os.environ.get('RDS_CREDENTIALS')
def reformat_sqs_message(message):
job = {}
for key, value in message.message_attributes.iteritems():
job[key] = value['StringValue']
return job
def main():
with session_scope(RDS_CREDENTIALS) as session:
sqs = boto3.resource('sqs', region_name=SQS_REGION)
queue = sqs.get_queue_by_name(QueueName=SQS_QUEUE)
for _ in xrange(100):
messages = queue.receive_messages(MaxNumberOfMessages=10, MessageAttributeNames=['All'])
if messages:
for message in messages:
# data = reformat_sqs_message(message)
# job = ScrapedJob(url=data['url'], created_at=dateutil.parser.parse(data['posted']), data=data)
job = ScrapedJob.from_dict(reformat_sqs_message(message))
query = session.query(ScrapedJob).filter(ScrapedJob.url == job.url)
matched_job = query.one_or_none()
if matched_job is None:
# it's a new job, since it hasn't been seen before
session.add(job)
else:
if job.created_at.tzinfo is None:
job.created_at = job.created_at.replace(tzinfo=dateutil.tz.tzutc())
if job.created_at < matched_job.created_at:
# new record has an older timestamp
matched_job.created_at = job.created_at
# modifying the existing record will cause it to be marked as dirty
# so when the session is committed it will emit an UPDATE for the row
# completed message handling
queue.delete_messages(Entries=[
{'Id': m.message_id, 'ReceiptHandle': m.receipt_handle} for m in messages]) | 0.27513 | 0.068475 |
import logging
from argparse import Namespace, ArgumentParser
from typing import Final, Optional
import jupiter.command.command as command
from jupiter.domain.adate import ADate
from jupiter.use_cases.metrics.entry.update import MetricEntryUpdateUseCase
from jupiter.framework.update_action import UpdateAction
from jupiter.framework.base.entity_id import EntityId
LOGGER = logging.getLogger(__name__)
class MetricEntryUpdate(command.Command):
"""UseCase for updating a metric entry's properties."""
_command: Final[MetricEntryUpdateUseCase]
def __init__(self, the_command: MetricEntryUpdateUseCase) -> None:
"""Constructor."""
self._command = the_command
@staticmethod
def name() -> str:
"""The name of the command."""
return "metric-entry-update"
@staticmethod
def description() -> str:
"""The description of the command."""
return "Update a metric entry"
def build_parser(self, parser: ArgumentParser) -> None:
"""Construct a argparse parser for the command."""
parser.add_argument("--id", dest="ref_id", required=True, help="The id of the metric")
parser.add_argument("--collection-time", dest="collection_time", required=False,
help="The time at which a metric should be recorded")
parser.add_argument("--value", dest="value", required=False, type=float,
help="The value for the metric")
parser.add_argument("--notes", dest="notes", required=False, type=str,
help="A note for the metric")
parser.add_argument("--clear-notes", dest="clear_notes", default=False,
action="store_const", const=True, help="Clear the notes")
def run(self, args: Namespace) -> None:
"""Callback to execute when the command is invoked."""
ref_id = EntityId.from_raw(args.ref_id)
collection_time = UpdateAction.change_to(ADate.from_str(args.collection_time)) \
if args.collection_time is not None else UpdateAction.do_nothing()
value = UpdateAction.change_to(args.value) if args.value is not None else UpdateAction.do_nothing()
notes: UpdateAction[Optional[str]]
if args.clear_notes:
notes = UpdateAction.change_to(None)
elif args.notes is not None:
notes = UpdateAction.change_to(args.notes)
else:
notes = UpdateAction.do_nothing()
self._command.execute(MetricEntryUpdateUseCase.Args(
ref_id=ref_id, collection_time=collection_time, value=value, notes=notes)) | jupiter/command/metric_entry_update.py |
import logging
from argparse import Namespace, ArgumentParser
from typing import Final, Optional
import jupiter.command.command as command
from jupiter.domain.adate import ADate
from jupiter.use_cases.metrics.entry.update import MetricEntryUpdateUseCase
from jupiter.framework.update_action import UpdateAction
from jupiter.framework.base.entity_id import EntityId
LOGGER = logging.getLogger(__name__)
class MetricEntryUpdate(command.Command):
"""UseCase for updating a metric entry's properties."""
_command: Final[MetricEntryUpdateUseCase]
def __init__(self, the_command: MetricEntryUpdateUseCase) -> None:
"""Constructor."""
self._command = the_command
@staticmethod
def name() -> str:
"""The name of the command."""
return "metric-entry-update"
@staticmethod
def description() -> str:
"""The description of the command."""
return "Update a metric entry"
def build_parser(self, parser: ArgumentParser) -> None:
"""Construct a argparse parser for the command."""
parser.add_argument("--id", dest="ref_id", required=True, help="The id of the metric")
parser.add_argument("--collection-time", dest="collection_time", required=False,
help="The time at which a metric should be recorded")
parser.add_argument("--value", dest="value", required=False, type=float,
help="The value for the metric")
parser.add_argument("--notes", dest="notes", required=False, type=str,
help="A note for the metric")
parser.add_argument("--clear-notes", dest="clear_notes", default=False,
action="store_const", const=True, help="Clear the notes")
def run(self, args: Namespace) -> None:
"""Callback to execute when the command is invoked."""
ref_id = EntityId.from_raw(args.ref_id)
collection_time = UpdateAction.change_to(ADate.from_str(args.collection_time)) \
if args.collection_time is not None else UpdateAction.do_nothing()
value = UpdateAction.change_to(args.value) if args.value is not None else UpdateAction.do_nothing()
notes: UpdateAction[Optional[str]]
if args.clear_notes:
notes = UpdateAction.change_to(None)
elif args.notes is not None:
notes = UpdateAction.change_to(args.notes)
else:
notes = UpdateAction.do_nothing()
self._command.execute(MetricEntryUpdateUseCase.Args(
ref_id=ref_id, collection_time=collection_time, value=value, notes=notes)) | 0.93878 | 0.111096 |
import textwrap
import unittest
from mock import patch
class TestParseVagrantMachineReadableBoxList(unittest.TestCase):
def test_machine_readable_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent(r"""
1391708688,,box-name,lucid32
1391708688,,box-provider,virtualbox
1391708688,,box-name,precise64
1391708688,,box-provider,virtualbox
1391708688,,box-name,precise64
1391708688,,box-provider,vmware_fusion
""")
from fabtools.vagrant import _box_list_machine_readable
res = _box_list_machine_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
])
class TestParseVagrantBoxListWithProvider(unittest.TestCase):
def test_parse_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
lucid32 (virtualbox)
precise64 (virtualbox)
precise64 (vmware_fusion)
""")
from fabtools.vagrant import _box_list_human_readable
res = _box_list_human_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
])
class TestParseVagrantBoxListWithoutProvider(unittest.TestCase):
def test_parse_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
lucid32
precise64
""")
from fabtools.vagrant import _box_list_human_readable
res = _box_list_human_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
])
class TestVagrantBaseBoxes(unittest.TestCase):
def test_vagrant_base_boxes(self):
with patch('fabtools.vagrant._box_list') as mock_list:
mock_list.return_value = [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
]
from fabtools.vagrant import base_boxes
self.assertEqual(base_boxes(), ['lucid32', 'precise64']) | fabtools/tests/test_vagrant_base_boxes.py | import textwrap
import unittest
from mock import patch
class TestParseVagrantMachineReadableBoxList(unittest.TestCase):
def test_machine_readable_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent(r"""
1391708688,,box-name,lucid32
1391708688,,box-provider,virtualbox
1391708688,,box-name,precise64
1391708688,,box-provider,virtualbox
1391708688,,box-name,precise64
1391708688,,box-provider,vmware_fusion
""")
from fabtools.vagrant import _box_list_machine_readable
res = _box_list_machine_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
])
class TestParseVagrantBoxListWithProvider(unittest.TestCase):
def test_parse_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
lucid32 (virtualbox)
precise64 (virtualbox)
precise64 (vmware_fusion)
""")
from fabtools.vagrant import _box_list_human_readable
res = _box_list_human_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
])
class TestParseVagrantBoxListWithoutProvider(unittest.TestCase):
def test_parse_box_list(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
lucid32
precise64
""")
from fabtools.vagrant import _box_list_human_readable
res = _box_list_human_readable()
self.assertEqual(res, [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
])
class TestVagrantBaseBoxes(unittest.TestCase):
def test_vagrant_base_boxes(self):
with patch('fabtools.vagrant._box_list') as mock_list:
mock_list.return_value = [
('lucid32', 'virtualbox'),
('precise64', 'virtualbox'),
('precise64', 'vmware_fusion'),
]
from fabtools.vagrant import base_boxes
self.assertEqual(base_boxes(), ['lucid32', 'precise64']) | 0.453746 | 0.174024 |
from PIL import Image
from django.conf import settings
from django.core.exceptions import ValidationError
def _is_allowed_extension(image, valid_extensions):
img = Image.open(image)
if img.format.lower() not in valid_extensions:
return False
return True
def _is_image_width_less_than_or_equal(im, width):
image_width, _ = im.size
return image_width <= width
def _is_image_width_more_than_or_equal(im, width):
image_width, _ = im.size
return image_width >= width
def _is_image_height_less_than_or_equal(im, height):
_, image_height = im.size
return image_height <= height
def _is_image_height_more_than_or_equal(im, height):
_, image_height = im.size
return image_height >= height
def _is_image_size_is_less_than_or_equal(image, size_in_megabyte):
return image.size / (1024 ** 2) <= size_in_megabyte
def _check_min_max_width_length_size_image_extensions(
image,
width_max,
height_max,
width_min,
height_min,
size_max,
allowed_extensions,
):
errors = {
'width_max': False,
'height_max': False,
'width_min': False,
'height_min': False,
'size_max': False,
'image_extensions': False,
'size_min': False,
}
if not _is_allowed_extension(image, allowed_extensions):
errors['allowed_extensions'] = True
return errors
im = Image.open(image)
if not _is_image_width_less_than_or_equal(im, width_max):
errors['width_max'] = True
elif not _is_image_width_more_than_or_equal(im, width_min):
errors['width_min'] = True
if not _is_image_height_less_than_or_equal(im, height_max):
errors['height_max'] = True
elif not _is_image_height_more_than_or_equal(im, height_min):
errors['height_min'] = True
if not _is_image_size_is_less_than_or_equal(image, size_max):
errors['size_max'] = True
return errors
def profile_image_validate(image):
errors = _check_min_max_width_length_size_image_extensions(
image=image,
allowed_extensions=settings.PROFILE_IMAGE_ALLOWED_EXTENSIONS,
width_max=settings.PROFILE_IMAGE_WIDTH_MAX,
width_min=settings.PROFILE_IMAGE_WIDTH_MIN,
height_max=settings.PROFILE_IMAGE_HEIGHT_MAX,
height_min=settings.PROFILE_IMAGE_HEIGHT_MIN,
size_max=settings.PROFILE_IMAGE_SIZE_MAX,
)
error_messages = []
for key in errors:
if errors[key]:
error_messages.append(settings.ERROR_MESSAGES[f'PROFILE_IMAGE_{key.upper()}'])
if error_messages:
raise ValidationError(error_messages)
def tag_image_validate(image):
errors = _check_min_max_width_length_size_image_extensions(
image=image,
allowed_extensions=settings.TAG_IMAGE_ALLOWED_EXTENSIONS,
width_max=settings.TAG_IMAGE_WIDTH_MAX,
width_min=settings.TAG_IMAGE_WIDTH_MIN,
height_max=settings.TAG_IMAGE_HEIGHT_MAX,
height_min=settings.TAG_IMAGE_HEIGHT_MIN,
size_max=settings.TAG_IMAGE_SIZE_MAX,
)
error_messages = []
for key in errors:
if errors[key]:
error_messages.append(settings.ERROR_MESSAGES[f'TAG_IMAGE_{key.upper()}'])
if error_messages:
raise ValidationError(error_messages) | Validators/image_validators.py | from PIL import Image
from django.conf import settings
from django.core.exceptions import ValidationError
def _is_allowed_extension(image, valid_extensions):
img = Image.open(image)
if img.format.lower() not in valid_extensions:
return False
return True
def _is_image_width_less_than_or_equal(im, width):
image_width, _ = im.size
return image_width <= width
def _is_image_width_more_than_or_equal(im, width):
image_width, _ = im.size
return image_width >= width
def _is_image_height_less_than_or_equal(im, height):
_, image_height = im.size
return image_height <= height
def _is_image_height_more_than_or_equal(im, height):
_, image_height = im.size
return image_height >= height
def _is_image_size_is_less_than_or_equal(image, size_in_megabyte):
return image.size / (1024 ** 2) <= size_in_megabyte
def _check_min_max_width_length_size_image_extensions(
image,
width_max,
height_max,
width_min,
height_min,
size_max,
allowed_extensions,
):
errors = {
'width_max': False,
'height_max': False,
'width_min': False,
'height_min': False,
'size_max': False,
'image_extensions': False,
'size_min': False,
}
if not _is_allowed_extension(image, allowed_extensions):
errors['allowed_extensions'] = True
return errors
im = Image.open(image)
if not _is_image_width_less_than_or_equal(im, width_max):
errors['width_max'] = True
elif not _is_image_width_more_than_or_equal(im, width_min):
errors['width_min'] = True
if not _is_image_height_less_than_or_equal(im, height_max):
errors['height_max'] = True
elif not _is_image_height_more_than_or_equal(im, height_min):
errors['height_min'] = True
if not _is_image_size_is_less_than_or_equal(image, size_max):
errors['size_max'] = True
return errors
def profile_image_validate(image):
errors = _check_min_max_width_length_size_image_extensions(
image=image,
allowed_extensions=settings.PROFILE_IMAGE_ALLOWED_EXTENSIONS,
width_max=settings.PROFILE_IMAGE_WIDTH_MAX,
width_min=settings.PROFILE_IMAGE_WIDTH_MIN,
height_max=settings.PROFILE_IMAGE_HEIGHT_MAX,
height_min=settings.PROFILE_IMAGE_HEIGHT_MIN,
size_max=settings.PROFILE_IMAGE_SIZE_MAX,
)
error_messages = []
for key in errors:
if errors[key]:
error_messages.append(settings.ERROR_MESSAGES[f'PROFILE_IMAGE_{key.upper()}'])
if error_messages:
raise ValidationError(error_messages)
def tag_image_validate(image):
errors = _check_min_max_width_length_size_image_extensions(
image=image,
allowed_extensions=settings.TAG_IMAGE_ALLOWED_EXTENSIONS,
width_max=settings.TAG_IMAGE_WIDTH_MAX,
width_min=settings.TAG_IMAGE_WIDTH_MIN,
height_max=settings.TAG_IMAGE_HEIGHT_MAX,
height_min=settings.TAG_IMAGE_HEIGHT_MIN,
size_max=settings.TAG_IMAGE_SIZE_MAX,
)
error_messages = []
for key in errors:
if errors[key]:
error_messages.append(settings.ERROR_MESSAGES[f'TAG_IMAGE_{key.upper()}'])
if error_messages:
raise ValidationError(error_messages) | 0.538255 | 0.170128 |
import requests
from lxml import etree
from bs4 import BeautifulSoup
import os
import re
class RSpider:
def __init__(self):
self.url = 'http://www.kekenet.com/Article/media/economist/'
def get_html(self, url):
try:
# print(url)
html = requests.get(url=url)
html.encoding = 'utf-8'
if html.status_code == 200:
# print(html.text)
return html.text
else:
return None
except Exception as e:
print(e.args)
def get_list_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="menu-list"]/li'
ul = tree.xpath(xp)
ans = []
for li in ul:
li = li.xpath('h2/a[2]')[0]
url = li.get('href')
title = li.get('title')
ans.append({"url": url, "title": title})
return ans
def get_mp3(self, url):
url = re.sub('/\w+/', '/mp3/', url)
# print(url)
html = requests.get(url)
html.encoding = 'utf-8'
if html.status_code == 200:
soup = BeautifulSoup(html.text, 'lxml')
ans = soup.find_all('a', href = re.compile('http://k6.kekenet.com/Sound/'))
if len(ans) != 0:
return ans[0].get('href')
else:
return None
else:
return None
def get_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="article_eng"]/div//text()'
div = tree.xpath(xp)
eng = ""
chinese = ""
if len(div) != 0:
for value in div:
if value == '' or value == '\n':
continue
if isChinese(value):
chinese += value + '\n'
else:
eng += value + '\n'
return (chinese, eng)
def get_all(self, url):
return (self.get_text(url), self.get_mp3(url))
def isChinese(s):
flag = 0
for i in s:
if '\u4e00' <= i <= '\u9fff':
flag = 1
break
return flag
if __name__ == "__main__":
spider = RSpider()
ans = spider.get_list_text(spider.url)
for i in ans:
print(spider.get_all(i['url'])[1]) | EngLearner/mainsys/readSpider.py | import requests
from lxml import etree
from bs4 import BeautifulSoup
import os
import re
class RSpider:
def __init__(self):
self.url = 'http://www.kekenet.com/Article/media/economist/'
def get_html(self, url):
try:
# print(url)
html = requests.get(url=url)
html.encoding = 'utf-8'
if html.status_code == 200:
# print(html.text)
return html.text
else:
return None
except Exception as e:
print(e.args)
def get_list_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="menu-list"]/li'
ul = tree.xpath(xp)
ans = []
for li in ul:
li = li.xpath('h2/a[2]')[0]
url = li.get('href')
title = li.get('title')
ans.append({"url": url, "title": title})
return ans
def get_mp3(self, url):
url = re.sub('/\w+/', '/mp3/', url)
# print(url)
html = requests.get(url)
html.encoding = 'utf-8'
if html.status_code == 200:
soup = BeautifulSoup(html.text, 'lxml')
ans = soup.find_all('a', href = re.compile('http://k6.kekenet.com/Sound/'))
if len(ans) != 0:
return ans[0].get('href')
else:
return None
else:
return None
def get_text(self, url):
html = self.get_html(url)
tree = etree.HTML(html)
xp = '//*[@id="article_eng"]/div//text()'
div = tree.xpath(xp)
eng = ""
chinese = ""
if len(div) != 0:
for value in div:
if value == '' or value == '\n':
continue
if isChinese(value):
chinese += value + '\n'
else:
eng += value + '\n'
return (chinese, eng)
def get_all(self, url):
return (self.get_text(url), self.get_mp3(url))
def isChinese(s):
flag = 0
for i in s:
if '\u4e00' <= i <= '\u9fff':
flag = 1
break
return flag
if __name__ == "__main__":
spider = RSpider()
ans = spider.get_list_text(spider.url)
for i in ans:
print(spider.get_all(i['url'])[1]) | 0.067651 | 0.061819 |
import unittest
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.symplectic import PauliTable, pauli_basis
class TestPauliBasis(QiskitTestCase):
"""Test pauli_basis function"""
def test_standard_order_1q(self):
"""Test 1-qubit pauli_basis function."""
labels = ["I", "X", "Y", "Z"]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(1), target)
def test_weight_order_1q(self):
"""Test 1-qubit pauli_basis function with weight=True."""
labels = ["I", "X", "Y", "Z"]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(1, weight=True), target)
def test_standard_order_2q(self):
"""Test 2-qubit pauli_basis function."""
labels = [
"II",
"IX",
"IY",
"IZ",
"XI",
"XX",
"XY",
"XZ",
"YI",
"YX",
"YY",
"YZ",
"ZI",
"ZX",
"ZY",
"ZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(2), target)
def test_weight_order_2q(self):
"""Test 2-qubit pauli_basis function with weight=True."""
labels = [
"II",
"IX",
"IY",
"IZ",
"XI",
"YI",
"ZI",
"XX",
"XY",
"XZ",
"YX",
"YY",
"YZ",
"ZX",
"ZY",
"ZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(2, weight=True), target)
def test_standard_order_3q(self):
"""Test 3-qubit pauli_basis function."""
labels = [
"III",
"IIX",
"IIY",
"IIZ",
"IXI",
"IXX",
"IXY",
"IXZ",
"IYI",
"IYX",
"IYY",
"IYZ",
"IZI",
"IZX",
"IZY",
"IZZ",
"XII",
"XIX",
"XIY",
"XIZ",
"XXI",
"XXX",
"XXY",
"XXZ",
"XYI",
"XYX",
"XYY",
"XYZ",
"XZI",
"XZX",
"XZY",
"XZZ",
"YII",
"YIX",
"YIY",
"YIZ",
"YXI",
"YXX",
"YXY",
"YXZ",
"YYI",
"YYX",
"YYY",
"YYZ",
"YZI",
"YZX",
"YZY",
"YZZ",
"ZII",
"ZIX",
"ZIY",
"ZIZ",
"ZXI",
"ZXX",
"ZXY",
"ZXZ",
"ZYI",
"ZYX",
"ZYY",
"ZYZ",
"ZZI",
"ZZX",
"ZZY",
"ZZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(3), target)
def test_weight_order_3q(self):
"""Test 3-qubit pauli_basis function with weight=True."""
labels = [
"III",
"IIX",
"IIY",
"IIZ",
"IXI",
"IYI",
"IZI",
"XII",
"YII",
"ZII",
"IXX",
"IXY",
"IXZ",
"IYX",
"IYY",
"IYZ",
"IZX",
"IZY",
"IZZ",
"XIX",
"XIY",
"XIZ",
"XXI",
"XYI",
"XZI",
"YIX",
"YIY",
"YIZ",
"YXI",
"YYI",
"YZI",
"ZIX",
"ZIY",
"ZIZ",
"ZXI",
"ZYI",
"ZZI",
"XXX",
"XXY",
"XXZ",
"XYX",
"XYY",
"XYZ",
"XZX",
"XZY",
"XZZ",
"YXX",
"YXY",
"YXZ",
"YYX",
"YYY",
"YYZ",
"YZX",
"YZY",
"YZZ",
"ZXX",
"ZXY",
"ZXZ",
"ZYX",
"ZYY",
"ZYZ",
"ZZX",
"ZZY",
"ZZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(3, weight=True), target)
if __name__ == "__main__":
unittest.main() | test/python/quantum_info/operators/symplectic/test_pauli_utils.py | import unittest
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.symplectic import PauliTable, pauli_basis
class TestPauliBasis(QiskitTestCase):
"""Test pauli_basis function"""
def test_standard_order_1q(self):
"""Test 1-qubit pauli_basis function."""
labels = ["I", "X", "Y", "Z"]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(1), target)
def test_weight_order_1q(self):
"""Test 1-qubit pauli_basis function with weight=True."""
labels = ["I", "X", "Y", "Z"]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(1, weight=True), target)
def test_standard_order_2q(self):
"""Test 2-qubit pauli_basis function."""
labels = [
"II",
"IX",
"IY",
"IZ",
"XI",
"XX",
"XY",
"XZ",
"YI",
"YX",
"YY",
"YZ",
"ZI",
"ZX",
"ZY",
"ZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(2), target)
def test_weight_order_2q(self):
"""Test 2-qubit pauli_basis function with weight=True."""
labels = [
"II",
"IX",
"IY",
"IZ",
"XI",
"YI",
"ZI",
"XX",
"XY",
"XZ",
"YX",
"YY",
"YZ",
"ZX",
"ZY",
"ZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(2, weight=True), target)
def test_standard_order_3q(self):
"""Test 3-qubit pauli_basis function."""
labels = [
"III",
"IIX",
"IIY",
"IIZ",
"IXI",
"IXX",
"IXY",
"IXZ",
"IYI",
"IYX",
"IYY",
"IYZ",
"IZI",
"IZX",
"IZY",
"IZZ",
"XII",
"XIX",
"XIY",
"XIZ",
"XXI",
"XXX",
"XXY",
"XXZ",
"XYI",
"XYX",
"XYY",
"XYZ",
"XZI",
"XZX",
"XZY",
"XZZ",
"YII",
"YIX",
"YIY",
"YIZ",
"YXI",
"YXX",
"YXY",
"YXZ",
"YYI",
"YYX",
"YYY",
"YYZ",
"YZI",
"YZX",
"YZY",
"YZZ",
"ZII",
"ZIX",
"ZIY",
"ZIZ",
"ZXI",
"ZXX",
"ZXY",
"ZXZ",
"ZYI",
"ZYX",
"ZYY",
"ZYZ",
"ZZI",
"ZZX",
"ZZY",
"ZZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(3), target)
def test_weight_order_3q(self):
"""Test 3-qubit pauli_basis function with weight=True."""
labels = [
"III",
"IIX",
"IIY",
"IIZ",
"IXI",
"IYI",
"IZI",
"XII",
"YII",
"ZII",
"IXX",
"IXY",
"IXZ",
"IYX",
"IYY",
"IYZ",
"IZX",
"IZY",
"IZZ",
"XIX",
"XIY",
"XIZ",
"XXI",
"XYI",
"XZI",
"YIX",
"YIY",
"YIZ",
"YXI",
"YYI",
"YZI",
"ZIX",
"ZIY",
"ZIZ",
"ZXI",
"ZYI",
"ZZI",
"XXX",
"XXY",
"XXZ",
"XYX",
"XYY",
"XYZ",
"XZX",
"XZY",
"XZZ",
"YXX",
"YXY",
"YXZ",
"YYX",
"YYY",
"YYZ",
"YZX",
"YZY",
"YZZ",
"ZXX",
"ZXY",
"ZXZ",
"ZYX",
"ZYY",
"ZYZ",
"ZZX",
"ZZY",
"ZZZ",
]
target = PauliTable.from_labels(labels)
self.assertEqual(pauli_basis(3, weight=True), target)
if __name__ == "__main__":
unittest.main() | 0.757256 | 0.759359 |
import math
from PySide import QtCore, QtGui
class View(QtGui.QGraphicsView):
'''A View supporting smooth panning and zooming. Use Alt+Left Mouse to
pan and Alt+Middle or Right Mouse to zoom. Dragging without Alt drags out
a selection marquee.
.. seealso::
Documentation for :class:`QtGui.QGraphicsView`'''
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setTransformationAnchor(QtGui.QGraphicsView.NoAnchor)
self.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)
self.setRubberBandSelectionMode(QtCore.Qt.IntersectsItemShape)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setRenderHint(QtGui.QPainter.Antialiasing)
# Set a massive canvas for seemingly unlimited pan and zoom
self.setSceneRect(0, 0, 32000, 32000)
self.centerOn(16000, 16000)
self._last_pos = QtCore.QPoint(0, 0)
self._drag_mod = QtCore.Qt.AltModifier
self._drag_buttons = [QtCore.Qt.LeftButton]
self._pan_buttons = [QtCore.Qt.LeftButton]
self._zoom_buttons = [QtCore.Qt.MiddleButton, QtCore.Qt.RightButton]
self._rel_scale = 1
def mousePressEvent(self, event):
'''Overloaded to support both marquee dragging and pan/zoom. Here we
setup the dragging mode and store the anchor position.'''
m = event.modifiers()
b = event.buttons()
if m == self._drag_mod or not b in self._drag_buttons:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
self._last_pos = self._anchor_pos = event.pos()
super(View, self).mousePressEvent(event)
def zoom(self, factor):
'''Zoom the view.
:param factor: Amount to scale'''
rel_scale = self._rel_scale * factor
if rel_scale < 0.2 or rel_scale > 8:
return
self._rel_scale = rel_scale
transform = self.transform()
transform.scale(factor, factor)
self.setTransform(transform)
def pan(self, x, y):
'''Pan the view.
:param x: Number of pixels in x
:param y: Number of pixels in y'''
self.translate(-x, -y)
def mouseMoveEvent(self, event):
if not event.modifiers() == QtCore.Qt.AltModifier:
super(View, self).mouseMoveEvent(event)
return
b = event.buttons()
pos = event.pos()
delta = pos - self._last_pos
if b in self._pan_buttons:
delta /= self.transform().m11()
self.pan(-delta.x(), -delta.y())
elif b in self._zoom_buttons:
old_pos = self.mapToScene(self._anchor_pos)
step = 0.02 * max(math.sqrt(delta.x() ** 2 + delta.y() ** 2), 1.0)
if delta.x() < 0 or -delta.y() < 0:
step *= -1
factor = 1 + step
self.zoom(factor) # Zoom
delta = self.mapToScene(self._anchor_pos) - old_pos
self.pan(-delta.x(), -delta.y()) # Pan to center on mouse pivot
self._last_pos = pos
def mouseReleaseEvent(self, event):
if event.modifiers() == self._drag_mod:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
super(View, self).mouseReleaseEvent(event) | nodify/view.py | import math
from PySide import QtCore, QtGui
class View(QtGui.QGraphicsView):
'''A View supporting smooth panning and zooming. Use Alt+Left Mouse to
pan and Alt+Middle or Right Mouse to zoom. Dragging without Alt drags out
a selection marquee.
.. seealso::
Documentation for :class:`QtGui.QGraphicsView`'''
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setTransformationAnchor(QtGui.QGraphicsView.NoAnchor)
self.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)
self.setRubberBandSelectionMode(QtCore.Qt.IntersectsItemShape)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setRenderHint(QtGui.QPainter.Antialiasing)
# Set a massive canvas for seemingly unlimited pan and zoom
self.setSceneRect(0, 0, 32000, 32000)
self.centerOn(16000, 16000)
self._last_pos = QtCore.QPoint(0, 0)
self._drag_mod = QtCore.Qt.AltModifier
self._drag_buttons = [QtCore.Qt.LeftButton]
self._pan_buttons = [QtCore.Qt.LeftButton]
self._zoom_buttons = [QtCore.Qt.MiddleButton, QtCore.Qt.RightButton]
self._rel_scale = 1
def mousePressEvent(self, event):
'''Overloaded to support both marquee dragging and pan/zoom. Here we
setup the dragging mode and store the anchor position.'''
m = event.modifiers()
b = event.buttons()
if m == self._drag_mod or not b in self._drag_buttons:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
self._last_pos = self._anchor_pos = event.pos()
super(View, self).mousePressEvent(event)
def zoom(self, factor):
'''Zoom the view.
:param factor: Amount to scale'''
rel_scale = self._rel_scale * factor
if rel_scale < 0.2 or rel_scale > 8:
return
self._rel_scale = rel_scale
transform = self.transform()
transform.scale(factor, factor)
self.setTransform(transform)
def pan(self, x, y):
'''Pan the view.
:param x: Number of pixels in x
:param y: Number of pixels in y'''
self.translate(-x, -y)
def mouseMoveEvent(self, event):
if not event.modifiers() == QtCore.Qt.AltModifier:
super(View, self).mouseMoveEvent(event)
return
b = event.buttons()
pos = event.pos()
delta = pos - self._last_pos
if b in self._pan_buttons:
delta /= self.transform().m11()
self.pan(-delta.x(), -delta.y())
elif b in self._zoom_buttons:
old_pos = self.mapToScene(self._anchor_pos)
step = 0.02 * max(math.sqrt(delta.x() ** 2 + delta.y() ** 2), 1.0)
if delta.x() < 0 or -delta.y() < 0:
step *= -1
factor = 1 + step
self.zoom(factor) # Zoom
delta = self.mapToScene(self._anchor_pos) - old_pos
self.pan(-delta.x(), -delta.y()) # Pan to center on mouse pivot
self._last_pos = pos
def mouseReleaseEvent(self, event):
if event.modifiers() == self._drag_mod:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
super(View, self).mouseReleaseEvent(event) | 0.615435 | 0.244758 |
import logging
logger = logging.getLogger(__name__)
class ConstIterations:
"""Stopping Criterion: After certain iterations
Args:
num_iters (:obj:`int`): Number of iterations
Attributes:
num_iters (:obj:`int`): Number of iterations
cur_iter (:obj:`int`): Current number of iterations
"""
def __init__(self, num_iters):
self.num_iters = num_iters
self.cur_iter = 0
def reset(self):
"""Reset internal iteration counter
"""
self.cur_iter = 0
def continue_learning(self):
"""Determine whether learning should continue
If so, return True, otherwise, return False.
"""
if self.cur_iter < self.num_iters:
self.cur_iter += 1
return True
else:
return False
class MonitorBased:
"""Stop training based on the return of a monitoring function.
If the monitoring result keep improving within past n_steps, keep learning.
Otherwise, stop.
If the monitoring result is the best at the moment, call the parameter save
function.
Once it is done, the parameters saved last is the training results.
Args:
n_steps (:obj:`int`): The amount of steps to look for improvement
monitor_fn: Parameter monitor function.
monitor_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for monitor function.
save_fn: Parameter save function.
save_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for save function.
Attributes:
n_steps (:obj:`int`): The amount of steps to look for improvement
monitor_fn: Parameter monitor function.
monitor_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for monitor function.
save_fn: Parameter save function.
save_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for save function.
step_count (:obj:`int`): Number of steps that the parameter monitored is worse than the best value.
best_value: Best value seen so far.
"""
def __init__(self, n_steps, monitor_fn, monitor_fn_args, save_fn, save_fn_args):
self.n_steps = n_steps
self.monitor_fn = monitor_fn
self.monitor_fn_args = monitor_fn_args
self.save_fn = save_fn
self.save_fn_args = save_fn_args
self.step_count = 0
self.best_value = None
def reset(self):
"""Reset internal step count
"""
self.step_count = 0
self.best_value = None
def continue_learning(self):
"""Determine whether learning should continue
If so, return True, otherwise, return False.
"""
param = self.monitor_fn(*self.monitor_fn_args)
if self.best_value is None:
self.best_value = param
self.save_fn(*self.save_fn_args)
if param > self.best_value:
self.step_count = 0
self.best_value = param
self.save_fn(*self.save_fn_args)
logger.info('New Best: %g' % self.best_value)
else:
self.step_count += 1
if self.step_count > self.n_steps:
return False
return True | pyActLearn/learning/nn/criterion.py | import logging
logger = logging.getLogger(__name__)
class ConstIterations:
"""Stopping Criterion: After certain iterations
Args:
num_iters (:obj:`int`): Number of iterations
Attributes:
num_iters (:obj:`int`): Number of iterations
cur_iter (:obj:`int`): Current number of iterations
"""
def __init__(self, num_iters):
self.num_iters = num_iters
self.cur_iter = 0
def reset(self):
"""Reset internal iteration counter
"""
self.cur_iter = 0
def continue_learning(self):
"""Determine whether learning should continue
If so, return True, otherwise, return False.
"""
if self.cur_iter < self.num_iters:
self.cur_iter += 1
return True
else:
return False
class MonitorBased:
"""Stop training based on the return of a monitoring function.
If the monitoring result keep improving within past n_steps, keep learning.
Otherwise, stop.
If the monitoring result is the best at the moment, call the parameter save
function.
Once it is done, the parameters saved last is the training results.
Args:
n_steps (:obj:`int`): The amount of steps to look for improvement
monitor_fn: Parameter monitor function.
monitor_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for monitor function.
save_fn: Parameter save function.
save_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for save function.
Attributes:
n_steps (:obj:`int`): The amount of steps to look for improvement
monitor_fn: Parameter monitor function.
monitor_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for monitor function.
save_fn: Parameter save function.
save_fn_args (:obj:`tuple`): Argument tuple (arg1, arg2, ...) for save function.
step_count (:obj:`int`): Number of steps that the parameter monitored is worse than the best value.
best_value: Best value seen so far.
"""
def __init__(self, n_steps, monitor_fn, monitor_fn_args, save_fn, save_fn_args):
self.n_steps = n_steps
self.monitor_fn = monitor_fn
self.monitor_fn_args = monitor_fn_args
self.save_fn = save_fn
self.save_fn_args = save_fn_args
self.step_count = 0
self.best_value = None
def reset(self):
"""Reset internal step count
"""
self.step_count = 0
self.best_value = None
def continue_learning(self):
"""Determine whether learning should continue
If so, return True, otherwise, return False.
"""
param = self.monitor_fn(*self.monitor_fn_args)
if self.best_value is None:
self.best_value = param
self.save_fn(*self.save_fn_args)
if param > self.best_value:
self.step_count = 0
self.best_value = param
self.save_fn(*self.save_fn_args)
logger.info('New Best: %g' % self.best_value)
else:
self.step_count += 1
if self.step_count > self.n_steps:
return False
return True | 0.865352 | 0.367327 |
from flask_app import flask_app,db
from datetime import datetime
enable_search = True
import flask_whooshalchemy as whooshalchemy
class Message(db.Model):
#Note the __bind_key__ below --> as this table is going to a different database
__bind_key__ = 'message'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(500), index=True, nullable=False)
email = db.Column(db.String(100), index=True, nullable=False)
sender_type = db.Column(db.String(100), nullable=False)
subscribed = db.Column(db.Boolean)
timestamp = db.Column(db.DateTime)
def __init__(self,body,email,sender_type,subscribed):
self.body = body
self.email = email
self.sender_type = sender_type
self.subscribed = subscribed
self.timestamp = datetime.utcnow()
class Zip_to_latlong(db.Model):
__tablename__ = 'zip_to_latlong'
id = db.Column(db.Integer, index=True, primary_key=True)
zip_code = db.Column(db.String(5), index=True, unique=True)
lat_ = db.Column(db.Float(10))
long_ = db.Column(db.Float(10))
def __init__(self,zip_code, lat_,long_):
self.zip_code = zip_code
self.lat_ = lat_
self.long_ = long_
class Wiki_summary(db.Model):
__tablename__ = 'wiki_social'
__searchable__ = ['wiki_summary']
id = db.Column(db.Integer, index=True, primary_key=True)
uid = db.Column(db.String(150), index=True, unique=True, nullable=False)
inst_nm = db.Column(db.String(100))
UNITID = db.Column(db.String(10))
OPEID = db.Column(db.String(10))
wiki_summary = db.Column(db.String(10000))
date_extracted = db.Column(db.String(10))
wiki_url = db.Column(db.String(150))
FB_HANDL = db.Column(db.String(100))
TW_HANDL = db.Column(db.String(100))
def __init__(self, uid, inst_nm, UNITID, OPEID, wiki_summary, date_extracted, wiki_url, FB_HANDL, TW_HANDL):
self.uid = uid
self.inst_nm = inst_nm
self.UNITID = UNITID
self.OPEID = OPEID
self.wiki_summary = wiki_summary
self.date_extracted = date_extracted
self.wiki_url = wiki_url
self.FB_HANDL = FB_HANDL
self.TW_HANDL = TW_HANDL
class Nat_avg(db.Model):
__tablename__ = 'national_average'
id = db.Column(db.Integer, index=True, primary_key=True)
CCBASIC = db.Column(db.Integer)
MN_EARN_WNE_P6 = db.Column(db.Float(5))
DEBT_MDN = db.Column(db.Float(5))
C150_4_COMB = db.Column(db.Float(3))
COSTT4_COMB = db.Column(db.Float(5))
WDRAW_ORIG_YR6_RT = db.Column(db.Float(3))
NPT4_COMB = db.Column(db.Float(5))
PCTPELL = db.Column(db.Float(3))
RET_FT4_COMB = db.Column(db.Float(3))
RET_PT4_COMB = db.Column(db.Float(3))
ADJ_AVGFACSAL = db.Column(db.Float(5))
ADJ_INEXPFTE = db.Column(db.Float(5))
PFTFTUG1_EF = db.Column(db.Float(3))
PFTFAC = db.Column(db.Float(3))
COMB_RET_RATE = db.Column(db.Float(3))
def __init__(self,CCBASIC,MN_EARN_WNE_P6,DEBT_MDN,C150_4_COMB,COSTT4_COMB,WDRAW_ORIG_YR6_RT,
NPT4_COMB,PCTPELL,RET_FT4_COMB,RET_PT4_COMB,ADJ_AVGFACSAL,ADJ_INEXPFTE,
PFTFTUG1_EF,PFTFAC,COMB_RET_RATE):
self.CCBASIC = CCBASIC
self.MN_EARN_WNE_P6 = MN_EARN_WNE_P6
self.DEBT_MDN = DEBT_MDN
self.C150_4_COMB = C150_4_COMB
self.COSTT4_COMB = COSTT4_COMB
self.WDRAW_ORIG_YR6_RT = WDRAW_ORIG_YR6_RT
self.NPT4_COMB = NPT4_COMB
self.PCTPELL = PCTPELL
self.RET_FT4_COMB = RET_FT4_COMB
self.RET_PT4_COMB = RET_PT4_COMB
self.ADJ_AVGFACSAL = ADJ_AVGFACSAL
self.ADJ_INEXPFTE = ADJ_INEXPFTE
self.PFTFTUG1_EF = PFTFTUG1_EF
self.PFTFAC = PFTFAC
self.COMB_RET_RATE = COMB_RET_RATE
class School_details(db.Model):
__tablename__ = 'school_details'
__searchable__ = ['INSTNM']
id = db.Column(db.Integer, index=True, primary_key=True)
uid = db.Column(db.String(150), index=True, unique=True)
INSTNM = db.Column(db.String(100))
UNITID = db.Column(db.String(10))
OPEID = db.Column(db.String(10))
OPEID6 = db.Column(db.String(10))
CITY = db.Column(db.String(50))
STABBR = db.Column(db.String(2))
ZIP5 = db.Column(db.String(5))
PREDDEG = db.Column(db.Integer)
HTTPS_INSTURL = db.Column(db.String(100))
HTTPS_NPCURL = db.Column(db.String(300))
HIGHDEG = db.Column(db.Integer)
CONTROL = db.Column(db.Integer)
REGION = db.Column(db.Integer)
LOCALE = db.Column(db.Integer)
LATITUDE = db.Column(db.Float(10))
LONGITUDE = db.Column(db.Float(10))
CCBASIC = db.Column(db.Integer)
CCUGPROF = db.Column(db.Integer)
CCSIZSET = db.Column(db.Integer)
SATVRMID = db.Column(db.Integer)
SATMTMID = db.Column(db.Integer)
SATWRMID = db.Column(db.Integer)
ACTCMMID = db.Column(db.Integer)
ACTENMID = db.Column(db.Integer)
ACTMTMID = db.Column(db.Integer)
ACTWRMID = db.Column(db.Integer)
SATVR25 = db.Column(db.Integer)
SATVR75 = db.Column(db.Integer)
SATMT25 = db.Column(db.Integer)
SATMT75 = db.Column(db.Integer)
SATWR25 = db.Column(db.Integer)
SATWR75 = db.Column(db.Integer)
ACTCM25 = db.Column(db.Integer)
ACTCM75 = db.Column(db.Integer)
ACTEN25 = db.Column(db.Integer)
ACTEN75 = db.Column(db.Integer)
ACTMT25 = db.Column(db.Integer)
ACTMT75 = db.Column(db.Integer)
ACTWR25 = db.Column(db.Integer)
ACTWR75 = db.Column(db.Integer)
POP_SUBS = db.Column(db.String(400))
UGDS = db.Column(db.Float(5))
TUITIONFEE_IN = db.Column(db.Float(5))
TUITIONFEE_OUT = db.Column(db.Float(5))
ADJ_ADM_RATE = db.Column(db.Float(5))
OTHER_AFFIL = db.Column(db.String(300))
REL_AFFIL = db.Column(db.String(100), nullable=True)
COUNT_MISSING = db.Column(db.Integer)
VALUE_INDEX = db.Column(db.Float(5))
CARE_INDEX = db.Column(db.Float(5))
Value_score = db.Column(db.Float(5))
Care_score = db.Column(db.Float(5))
r_fin_MN_EARN_WNE_P6 = db.Column(db.Float(5))
r_fin_DEBT_MDN = db.Column(db.Float(5))
r_fin_C150_4_COMB = db.Column(db.Float(5))
r_fin_COSTT4_COMB = db.Column(db.Float(5))
r_fin_WDRAW_ORIG_YR6_RT = db.Column(db.Float(5))
r_fin_NPT4_COMB = db.Column(db.Float(5))
r_fin_PCTPELL = db.Column(db.Float(5))
r_fin_RET_FT4_COMB = db.Column(db.Float(5))
r_fin_RET_PT4_COMB = db.Column(db.Float(5))
r_fin_ADJ_AVGFACSAL = db.Column(db.Float(5))
r_fin_ADJ_INEXPFTE = db.Column(db.Float(5))
r_fin_PFTFTUG1_EF = db.Column(db.Float(5))
r_fin_PFTFAC = db.Column(db.Float(5))
r_fin_COMB_RET_RATE = db.Column(db.Float(5))
MN_EARN_WNE_P6_PRESENT = db.Column(db.Integer)
DEBT_MDN_PRESENT = db.Column(db.Float(5))
C150_4_COMB_PRESENT = db.Column(db.Integer)
COSTT4_COMB_PRESENT = db.Column(db.Integer)
WDRAW_ORIG_YR6_RT_PRESENT = db.Column(db.Integer)
NPT4_COMB_PRESENT = db.Column(db.Integer)
PCTPELL_PRESENT = db.Column(db.Integer)
RET_FT4_COMB_PRESENT = db.Column(db.Integer)
RET_PT4_COMB_PRESENT = db.Column(db.Integer)
ADJ_AVGFACSAL_PRESENT = db.Column(db.Integer)
ADJ_INEXPFTE_PRESENT = db.Column(db.Integer)
PFTFTUG1_EF_PRESENT = db.Column(db.Integer)
PFTFAC_PRESENT = db.Column(db.Integer)
fin_COMB_RET_RATE_PRESENT = db.Column(db.Integer)
rankp_MN_EARN_WNE_P6 = db.Column(db.Float(5))
rankp_DEBT_MDN = db.Column(db.Float(5))
rankp_C150_4_COMB = db.Column(db.Float(5))
rankp_COSTT4_COMB = db.Column(db.Float(5))
rankp_WDRAW_ORIG_YR6_RT = db.Column(db.Float(5))
rankp_NPT4_COMB = db.Column(db.Float(5))
rankp_PCTPELL = db.Column(db.Float(5))
rankp_ADJ_AVGFACSAL = db.Column(db.Float(5))
rankp_ADJ_INEXPFTE = db.Column(db.Float(5))
rankp_PFTFAC = db.Column(db.Float(5))
rankp_COMB_RET_RATE = db.Column(db.Float(5))
adm_pct = db.Column(db.Float(3))
IF_SAT_PRESENT = db.Column(db.Boolean)
IF_ACT_PRESENT = db.Column(db.Boolean)
def __init__(self,uid, UNITID, OPEID,OPEID6,INSTNM,CITY,STABBR,ZIP5,PREDDEG,HTTPS_INSTURL,HTTPS_NPCURL,
HIGHDEG,CONTROL,REGION,LOCALE,LATITUDE,LONGITUDE,CCBASIC,CCUGPROF,CCSIZSET,SATVRMID,
SATMTMID,SATWRMID,ACTCMMID,ACTENMID,ACTMTMID,ACTWRMID,SATVR25, SATVR75, SATMT25, SATMT75, SATWR25, SATWR75, ACTCM25,ACTCM75, ACTEN25,ACTEN75,ACTMT25, ACTMT75,ACTWR25, ACTWR75,POP_SUBS,UGDS,
TUITIONFEE_IN,TUITIONFEE_OUT,ADJ_ADM_RATE,OTHER_AFFIL,REL_AFFIL,COUNT_MISSING,
VALUE_INDEX,CARE_INDEX,Value_score, Care_score, r_fin_MN_EARN_WNE_P6,r_fin_DEBT_MDN,r_fin_C150_4_COMB,r_fin_COSTT4_COMB,
r_fin_WDRAW_ORIG_YR6_RT,r_fin_NPT4_COMB,r_fin_PCTPELL,r_fin_RET_FT4_COMB,r_fin_RET_PT4_COMB,
r_fin_ADJ_AVGFACSAL,r_fin_ADJ_INEXPFTE,r_fin_PFTFTUG1_EF,r_fin_PFTFAC,r_fin_COMB_RET_RATE,
MN_EARN_WNE_P6_PRESENT,DEBT_MDN_PRESENT,C150_4_COMB_PRESENT,COSTT4_COMB_PRESENT,
WDRAW_ORIG_YR6_RT_PRESENT,NPT4_COMB_PRESENT,PCTPELL_PRESENT,RET_FT4_COMB_PRESENT,
RET_PT4_COMB_PRESENT,ADJ_AVGFACSAL_PRESENT,ADJ_INEXPFTE_PRESENT,PFTFTUG1_EF_PRESENT,
PFTFAC_PRESENT,fin_COMB_RET_RATE_PRESENT,rankp_MN_EARN_WNE_P6,rankp_DEBT_MDN,
rankp_C150_4_COMB,rankp_COSTT4_COMB,rankp_WDRAW_ORIG_YR6_RT,rankp_NPT4_COMB,rankp_PCTPELL,
rankp_ADJ_AVGFACSAL,rankp_ADJ_INEXPFTE,rankp_PFTFAC,rankp_COMB_RET_RATE,adm_pct,IF_SAT_PRESENT,
IF_ACT_PRESENT):
self.uid = uid
self.UNITID = UNITID
self.OPEID = OPEID
self.OPEID6 = OPEID6
self.INSTNM = INSTNM
self.CITY = CITY
self.STABBR = STABBR
self.ZIP5 = ZIP5
self.PREDDEG = PREDDEG
self.HTTPS_INSTURL = HTTPS_INSTURL
self.HTTPS_NPCURL = HTTPS_NPCURL
self.HIGHDEG = HIGHDEG
self.CONTROL = CONTROL
self.REGION = REGION
self.LOCALE = LOCALE
self.LATITUDE = LATITUDE
self.LONGITUDE = LONGITUDE
self.CCBASIC = CCBASIC
self.CCUGPROF = CCUGPROF
self.CCSIZSET = CCSIZSET
self.SATVRMID = SATVRMID
self.SATMTMID = SATMTMID
self.SATWRMID = SATWRMID
self.ACTCMMID = ACTCMMID
self.ACTENMID = ACTENMID
self.ACTMTMID = ACTMTMID
self.ACTWRMID = ACTWRMID
self.SATVR25 = SATVR25
self.SATVR75 = SATVR75
self.SATMT25 = SATMT25
self.SATMT75 = SATMT75
self.SATWR25 = SATWR25
self.SATWR75 = SATWR75
self.ACTCM25 = ACTCM25
self.ACTCM75 = ACTCM75
self.ACTEN25 = ACTEN25
self.ACTEN75 = ACTEN75
self.ACTMT25 = ACTMT25
self.ACTMT75 = ACTMT75
self.ACTWR25 = ACTWR25
self.ACTWR75 = ACTWR75
self.POP_SUBS = POP_SUBS
self.UGDS = UGDS
self.TUITIONFEE_IN = TUITIONFEE_IN
self.TUITIONFEE_OUT = TUITIONFEE_OUT
self.ADJ_ADM_RATE = ADJ_ADM_RATE
self.OTHER_AFFIL = OTHER_AFFIL
self.REL_AFFIL = REL_AFFIL
self.COUNT_MISSING = COUNT_MISSING
self.VALUE_INDEX = VALUE_INDEX
self.CARE_INDEX = CARE_INDEX
self.Value_score = Value_score
self.Care_score = Care_score
self.r_fin_MN_EARN_WNE_P6 = r_fin_MN_EARN_WNE_P6
self.r_fin_DEBT_MDN = r_fin_DEBT_MDN
self.r_fin_C150_4_COMB = r_fin_C150_4_COMB
self.r_fin_COSTT4_COMB = r_fin_COSTT4_COMB
self.r_fin_WDRAW_ORIG_YR6_RT = r_fin_WDRAW_ORIG_YR6_RT
self.r_fin_NPT4_COMB = r_fin_NPT4_COMB
self.r_fin_PCTPELL = r_fin_PCTPELL
self.r_fin_RET_FT4_COMB = r_fin_RET_FT4_COMB
self.r_fin_RET_PT4_COMB = r_fin_RET_PT4_COMB
self.r_fin_ADJ_AVGFACSAL = r_fin_ADJ_AVGFACSAL
self.r_fin_ADJ_INEXPFTE = r_fin_ADJ_INEXPFTE
self.r_fin_PFTFTUG1_EF = r_fin_PFTFTUG1_EF
self.r_fin_PFTFAC = r_fin_PFTFAC
self.r_fin_COMB_RET_RATE = r_fin_COMB_RET_RATE
self.MN_EARN_WNE_P6_PRESENT = MN_EARN_WNE_P6_PRESENT
self.DEBT_MDN_PRESENT = DEBT_MDN_PRESENT
self.C150_4_COMB_PRESENT = C150_4_COMB_PRESENT
self.COSTT4_COMB_PRESENT = COSTT4_COMB_PRESENT
self.WDRAW_ORIG_YR6_RT_PRESENT = WDRAW_ORIG_YR6_RT_PRESENT
self.NPT4_COMB_PRESENT = NPT4_COMB_PRESENT
self.PCTPELL_PRESENT = PCTPELL_PRESENT
self.RET_FT4_COMB_PRESENT = RET_FT4_COMB_PRESENT
self.RET_PT4_COMB_PRESENT = RET_PT4_COMB_PRESENT
self.ADJ_AVGFACSAL_PRESENT = ADJ_AVGFACSAL_PRESENT
self.ADJ_INEXPFTE_PRESENT = ADJ_INEXPFTE_PRESENT
self.PFTFTUG1_EF_PRESENT = PFTFTUG1_EF_PRESENT
self.PFTFAC_PRESENT = PFTFAC_PRESENT
self.fin_COMB_RET_RATE_PRESENT = fin_COMB_RET_RATE_PRESENT
self.rankp_MN_EARN_WNE_P6 = rankp_MN_EARN_WNE_P6
self.rankp_DEBT_MDN = rankp_DEBT_MDN
self.rankp_C150_4_COMB = rankp_C150_4_COMB
self.rankp_COSTT4_COMB = rankp_COSTT4_COMB
self.rankp_WDRAW_ORIG_YR6_RT = rankp_WDRAW_ORIG_YR6_RT
self.rankp_NPT4_COMB = rankp_NPT4_COMB
self.rankp_PCTPELL = rankp_PCTPELL
self.rankp_ADJ_AVGFACSAL = rankp_ADJ_AVGFACSAL
self.rankp_ADJ_INEXPFTE = rankp_ADJ_INEXPFTE
self.rankp_PFTFAC = rankp_PFTFAC
self.rankp_COMB_RET_RATE = rankp_COMB_RET_RATE
self.adm_pct = adm_pct
self.IF_SAT_PRESENT = IF_SAT_PRESENT
self.IF_ACT_PRESENT = IF_ACT_PRESENT
if enable_search:
whooshalchemy.whoosh_index(flask_app, School_details)
whooshalchemy.whoosh_index(flask_app, Wiki_summary) | flask_app/models.py | from flask_app import flask_app,db
from datetime import datetime
enable_search = True
import flask_whooshalchemy as whooshalchemy
class Message(db.Model):
#Note the __bind_key__ below --> as this table is going to a different database
__bind_key__ = 'message'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(500), index=True, nullable=False)
email = db.Column(db.String(100), index=True, nullable=False)
sender_type = db.Column(db.String(100), nullable=False)
subscribed = db.Column(db.Boolean)
timestamp = db.Column(db.DateTime)
def __init__(self,body,email,sender_type,subscribed):
self.body = body
self.email = email
self.sender_type = sender_type
self.subscribed = subscribed
self.timestamp = datetime.utcnow()
class Zip_to_latlong(db.Model):
__tablename__ = 'zip_to_latlong'
id = db.Column(db.Integer, index=True, primary_key=True)
zip_code = db.Column(db.String(5), index=True, unique=True)
lat_ = db.Column(db.Float(10))
long_ = db.Column(db.Float(10))
def __init__(self,zip_code, lat_,long_):
self.zip_code = zip_code
self.lat_ = lat_
self.long_ = long_
class Wiki_summary(db.Model):
__tablename__ = 'wiki_social'
__searchable__ = ['wiki_summary']
id = db.Column(db.Integer, index=True, primary_key=True)
uid = db.Column(db.String(150), index=True, unique=True, nullable=False)
inst_nm = db.Column(db.String(100))
UNITID = db.Column(db.String(10))
OPEID = db.Column(db.String(10))
wiki_summary = db.Column(db.String(10000))
date_extracted = db.Column(db.String(10))
wiki_url = db.Column(db.String(150))
FB_HANDL = db.Column(db.String(100))
TW_HANDL = db.Column(db.String(100))
def __init__(self, uid, inst_nm, UNITID, OPEID, wiki_summary, date_extracted, wiki_url, FB_HANDL, TW_HANDL):
self.uid = uid
self.inst_nm = inst_nm
self.UNITID = UNITID
self.OPEID = OPEID
self.wiki_summary = wiki_summary
self.date_extracted = date_extracted
self.wiki_url = wiki_url
self.FB_HANDL = FB_HANDL
self.TW_HANDL = TW_HANDL
class Nat_avg(db.Model):
__tablename__ = 'national_average'
id = db.Column(db.Integer, index=True, primary_key=True)
CCBASIC = db.Column(db.Integer)
MN_EARN_WNE_P6 = db.Column(db.Float(5))
DEBT_MDN = db.Column(db.Float(5))
C150_4_COMB = db.Column(db.Float(3))
COSTT4_COMB = db.Column(db.Float(5))
WDRAW_ORIG_YR6_RT = db.Column(db.Float(3))
NPT4_COMB = db.Column(db.Float(5))
PCTPELL = db.Column(db.Float(3))
RET_FT4_COMB = db.Column(db.Float(3))
RET_PT4_COMB = db.Column(db.Float(3))
ADJ_AVGFACSAL = db.Column(db.Float(5))
ADJ_INEXPFTE = db.Column(db.Float(5))
PFTFTUG1_EF = db.Column(db.Float(3))
PFTFAC = db.Column(db.Float(3))
COMB_RET_RATE = db.Column(db.Float(3))
def __init__(self,CCBASIC,MN_EARN_WNE_P6,DEBT_MDN,C150_4_COMB,COSTT4_COMB,WDRAW_ORIG_YR6_RT,
NPT4_COMB,PCTPELL,RET_FT4_COMB,RET_PT4_COMB,ADJ_AVGFACSAL,ADJ_INEXPFTE,
PFTFTUG1_EF,PFTFAC,COMB_RET_RATE):
self.CCBASIC = CCBASIC
self.MN_EARN_WNE_P6 = MN_EARN_WNE_P6
self.DEBT_MDN = DEBT_MDN
self.C150_4_COMB = C150_4_COMB
self.COSTT4_COMB = COSTT4_COMB
self.WDRAW_ORIG_YR6_RT = WDRAW_ORIG_YR6_RT
self.NPT4_COMB = NPT4_COMB
self.PCTPELL = PCTPELL
self.RET_FT4_COMB = RET_FT4_COMB
self.RET_PT4_COMB = RET_PT4_COMB
self.ADJ_AVGFACSAL = ADJ_AVGFACSAL
self.ADJ_INEXPFTE = ADJ_INEXPFTE
self.PFTFTUG1_EF = PFTFTUG1_EF
self.PFTFAC = PFTFAC
self.COMB_RET_RATE = COMB_RET_RATE
class School_details(db.Model):
__tablename__ = 'school_details'
__searchable__ = ['INSTNM']
id = db.Column(db.Integer, index=True, primary_key=True)
uid = db.Column(db.String(150), index=True, unique=True)
INSTNM = db.Column(db.String(100))
UNITID = db.Column(db.String(10))
OPEID = db.Column(db.String(10))
OPEID6 = db.Column(db.String(10))
CITY = db.Column(db.String(50))
STABBR = db.Column(db.String(2))
ZIP5 = db.Column(db.String(5))
PREDDEG = db.Column(db.Integer)
HTTPS_INSTURL = db.Column(db.String(100))
HTTPS_NPCURL = db.Column(db.String(300))
HIGHDEG = db.Column(db.Integer)
CONTROL = db.Column(db.Integer)
REGION = db.Column(db.Integer)
LOCALE = db.Column(db.Integer)
LATITUDE = db.Column(db.Float(10))
LONGITUDE = db.Column(db.Float(10))
CCBASIC = db.Column(db.Integer)
CCUGPROF = db.Column(db.Integer)
CCSIZSET = db.Column(db.Integer)
SATVRMID = db.Column(db.Integer)
SATMTMID = db.Column(db.Integer)
SATWRMID = db.Column(db.Integer)
ACTCMMID = db.Column(db.Integer)
ACTENMID = db.Column(db.Integer)
ACTMTMID = db.Column(db.Integer)
ACTWRMID = db.Column(db.Integer)
SATVR25 = db.Column(db.Integer)
SATVR75 = db.Column(db.Integer)
SATMT25 = db.Column(db.Integer)
SATMT75 = db.Column(db.Integer)
SATWR25 = db.Column(db.Integer)
SATWR75 = db.Column(db.Integer)
ACTCM25 = db.Column(db.Integer)
ACTCM75 = db.Column(db.Integer)
ACTEN25 = db.Column(db.Integer)
ACTEN75 = db.Column(db.Integer)
ACTMT25 = db.Column(db.Integer)
ACTMT75 = db.Column(db.Integer)
ACTWR25 = db.Column(db.Integer)
ACTWR75 = db.Column(db.Integer)
POP_SUBS = db.Column(db.String(400))
UGDS = db.Column(db.Float(5))
TUITIONFEE_IN = db.Column(db.Float(5))
TUITIONFEE_OUT = db.Column(db.Float(5))
ADJ_ADM_RATE = db.Column(db.Float(5))
OTHER_AFFIL = db.Column(db.String(300))
REL_AFFIL = db.Column(db.String(100), nullable=True)
COUNT_MISSING = db.Column(db.Integer)
VALUE_INDEX = db.Column(db.Float(5))
CARE_INDEX = db.Column(db.Float(5))
Value_score = db.Column(db.Float(5))
Care_score = db.Column(db.Float(5))
r_fin_MN_EARN_WNE_P6 = db.Column(db.Float(5))
r_fin_DEBT_MDN = db.Column(db.Float(5))
r_fin_C150_4_COMB = db.Column(db.Float(5))
r_fin_COSTT4_COMB = db.Column(db.Float(5))
r_fin_WDRAW_ORIG_YR6_RT = db.Column(db.Float(5))
r_fin_NPT4_COMB = db.Column(db.Float(5))
r_fin_PCTPELL = db.Column(db.Float(5))
r_fin_RET_FT4_COMB = db.Column(db.Float(5))
r_fin_RET_PT4_COMB = db.Column(db.Float(5))
r_fin_ADJ_AVGFACSAL = db.Column(db.Float(5))
r_fin_ADJ_INEXPFTE = db.Column(db.Float(5))
r_fin_PFTFTUG1_EF = db.Column(db.Float(5))
r_fin_PFTFAC = db.Column(db.Float(5))
r_fin_COMB_RET_RATE = db.Column(db.Float(5))
MN_EARN_WNE_P6_PRESENT = db.Column(db.Integer)
DEBT_MDN_PRESENT = db.Column(db.Float(5))
C150_4_COMB_PRESENT = db.Column(db.Integer)
COSTT4_COMB_PRESENT = db.Column(db.Integer)
WDRAW_ORIG_YR6_RT_PRESENT = db.Column(db.Integer)
NPT4_COMB_PRESENT = db.Column(db.Integer)
PCTPELL_PRESENT = db.Column(db.Integer)
RET_FT4_COMB_PRESENT = db.Column(db.Integer)
RET_PT4_COMB_PRESENT = db.Column(db.Integer)
ADJ_AVGFACSAL_PRESENT = db.Column(db.Integer)
ADJ_INEXPFTE_PRESENT = db.Column(db.Integer)
PFTFTUG1_EF_PRESENT = db.Column(db.Integer)
PFTFAC_PRESENT = db.Column(db.Integer)
fin_COMB_RET_RATE_PRESENT = db.Column(db.Integer)
rankp_MN_EARN_WNE_P6 = db.Column(db.Float(5))
rankp_DEBT_MDN = db.Column(db.Float(5))
rankp_C150_4_COMB = db.Column(db.Float(5))
rankp_COSTT4_COMB = db.Column(db.Float(5))
rankp_WDRAW_ORIG_YR6_RT = db.Column(db.Float(5))
rankp_NPT4_COMB = db.Column(db.Float(5))
rankp_PCTPELL = db.Column(db.Float(5))
rankp_ADJ_AVGFACSAL = db.Column(db.Float(5))
rankp_ADJ_INEXPFTE = db.Column(db.Float(5))
rankp_PFTFAC = db.Column(db.Float(5))
rankp_COMB_RET_RATE = db.Column(db.Float(5))
adm_pct = db.Column(db.Float(3))
IF_SAT_PRESENT = db.Column(db.Boolean)
IF_ACT_PRESENT = db.Column(db.Boolean)
def __init__(self,uid, UNITID, OPEID,OPEID6,INSTNM,CITY,STABBR,ZIP5,PREDDEG,HTTPS_INSTURL,HTTPS_NPCURL,
HIGHDEG,CONTROL,REGION,LOCALE,LATITUDE,LONGITUDE,CCBASIC,CCUGPROF,CCSIZSET,SATVRMID,
SATMTMID,SATWRMID,ACTCMMID,ACTENMID,ACTMTMID,ACTWRMID,SATVR25, SATVR75, SATMT25, SATMT75, SATWR25, SATWR75, ACTCM25,ACTCM75, ACTEN25,ACTEN75,ACTMT25, ACTMT75,ACTWR25, ACTWR75,POP_SUBS,UGDS,
TUITIONFEE_IN,TUITIONFEE_OUT,ADJ_ADM_RATE,OTHER_AFFIL,REL_AFFIL,COUNT_MISSING,
VALUE_INDEX,CARE_INDEX,Value_score, Care_score, r_fin_MN_EARN_WNE_P6,r_fin_DEBT_MDN,r_fin_C150_4_COMB,r_fin_COSTT4_COMB,
r_fin_WDRAW_ORIG_YR6_RT,r_fin_NPT4_COMB,r_fin_PCTPELL,r_fin_RET_FT4_COMB,r_fin_RET_PT4_COMB,
r_fin_ADJ_AVGFACSAL,r_fin_ADJ_INEXPFTE,r_fin_PFTFTUG1_EF,r_fin_PFTFAC,r_fin_COMB_RET_RATE,
MN_EARN_WNE_P6_PRESENT,DEBT_MDN_PRESENT,C150_4_COMB_PRESENT,COSTT4_COMB_PRESENT,
WDRAW_ORIG_YR6_RT_PRESENT,NPT4_COMB_PRESENT,PCTPELL_PRESENT,RET_FT4_COMB_PRESENT,
RET_PT4_COMB_PRESENT,ADJ_AVGFACSAL_PRESENT,ADJ_INEXPFTE_PRESENT,PFTFTUG1_EF_PRESENT,
PFTFAC_PRESENT,fin_COMB_RET_RATE_PRESENT,rankp_MN_EARN_WNE_P6,rankp_DEBT_MDN,
rankp_C150_4_COMB,rankp_COSTT4_COMB,rankp_WDRAW_ORIG_YR6_RT,rankp_NPT4_COMB,rankp_PCTPELL,
rankp_ADJ_AVGFACSAL,rankp_ADJ_INEXPFTE,rankp_PFTFAC,rankp_COMB_RET_RATE,adm_pct,IF_SAT_PRESENT,
IF_ACT_PRESENT):
self.uid = uid
self.UNITID = UNITID
self.OPEID = OPEID
self.OPEID6 = OPEID6
self.INSTNM = INSTNM
self.CITY = CITY
self.STABBR = STABBR
self.ZIP5 = ZIP5
self.PREDDEG = PREDDEG
self.HTTPS_INSTURL = HTTPS_INSTURL
self.HTTPS_NPCURL = HTTPS_NPCURL
self.HIGHDEG = HIGHDEG
self.CONTROL = CONTROL
self.REGION = REGION
self.LOCALE = LOCALE
self.LATITUDE = LATITUDE
self.LONGITUDE = LONGITUDE
self.CCBASIC = CCBASIC
self.CCUGPROF = CCUGPROF
self.CCSIZSET = CCSIZSET
self.SATVRMID = SATVRMID
self.SATMTMID = SATMTMID
self.SATWRMID = SATWRMID
self.ACTCMMID = ACTCMMID
self.ACTENMID = ACTENMID
self.ACTMTMID = ACTMTMID
self.ACTWRMID = ACTWRMID
self.SATVR25 = SATVR25
self.SATVR75 = SATVR75
self.SATMT25 = SATMT25
self.SATMT75 = SATMT75
self.SATWR25 = SATWR25
self.SATWR75 = SATWR75
self.ACTCM25 = ACTCM25
self.ACTCM75 = ACTCM75
self.ACTEN25 = ACTEN25
self.ACTEN75 = ACTEN75
self.ACTMT25 = ACTMT25
self.ACTMT75 = ACTMT75
self.ACTWR25 = ACTWR25
self.ACTWR75 = ACTWR75
self.POP_SUBS = POP_SUBS
self.UGDS = UGDS
self.TUITIONFEE_IN = TUITIONFEE_IN
self.TUITIONFEE_OUT = TUITIONFEE_OUT
self.ADJ_ADM_RATE = ADJ_ADM_RATE
self.OTHER_AFFIL = OTHER_AFFIL
self.REL_AFFIL = REL_AFFIL
self.COUNT_MISSING = COUNT_MISSING
self.VALUE_INDEX = VALUE_INDEX
self.CARE_INDEX = CARE_INDEX
self.Value_score = Value_score
self.Care_score = Care_score
self.r_fin_MN_EARN_WNE_P6 = r_fin_MN_EARN_WNE_P6
self.r_fin_DEBT_MDN = r_fin_DEBT_MDN
self.r_fin_C150_4_COMB = r_fin_C150_4_COMB
self.r_fin_COSTT4_COMB = r_fin_COSTT4_COMB
self.r_fin_WDRAW_ORIG_YR6_RT = r_fin_WDRAW_ORIG_YR6_RT
self.r_fin_NPT4_COMB = r_fin_NPT4_COMB
self.r_fin_PCTPELL = r_fin_PCTPELL
self.r_fin_RET_FT4_COMB = r_fin_RET_FT4_COMB
self.r_fin_RET_PT4_COMB = r_fin_RET_PT4_COMB
self.r_fin_ADJ_AVGFACSAL = r_fin_ADJ_AVGFACSAL
self.r_fin_ADJ_INEXPFTE = r_fin_ADJ_INEXPFTE
self.r_fin_PFTFTUG1_EF = r_fin_PFTFTUG1_EF
self.r_fin_PFTFAC = r_fin_PFTFAC
self.r_fin_COMB_RET_RATE = r_fin_COMB_RET_RATE
self.MN_EARN_WNE_P6_PRESENT = MN_EARN_WNE_P6_PRESENT
self.DEBT_MDN_PRESENT = DEBT_MDN_PRESENT
self.C150_4_COMB_PRESENT = C150_4_COMB_PRESENT
self.COSTT4_COMB_PRESENT = COSTT4_COMB_PRESENT
self.WDRAW_ORIG_YR6_RT_PRESENT = WDRAW_ORIG_YR6_RT_PRESENT
self.NPT4_COMB_PRESENT = NPT4_COMB_PRESENT
self.PCTPELL_PRESENT = PCTPELL_PRESENT
self.RET_FT4_COMB_PRESENT = RET_FT4_COMB_PRESENT
self.RET_PT4_COMB_PRESENT = RET_PT4_COMB_PRESENT
self.ADJ_AVGFACSAL_PRESENT = ADJ_AVGFACSAL_PRESENT
self.ADJ_INEXPFTE_PRESENT = ADJ_INEXPFTE_PRESENT
self.PFTFTUG1_EF_PRESENT = PFTFTUG1_EF_PRESENT
self.PFTFAC_PRESENT = PFTFAC_PRESENT
self.fin_COMB_RET_RATE_PRESENT = fin_COMB_RET_RATE_PRESENT
self.rankp_MN_EARN_WNE_P6 = rankp_MN_EARN_WNE_P6
self.rankp_DEBT_MDN = rankp_DEBT_MDN
self.rankp_C150_4_COMB = rankp_C150_4_COMB
self.rankp_COSTT4_COMB = rankp_COSTT4_COMB
self.rankp_WDRAW_ORIG_YR6_RT = rankp_WDRAW_ORIG_YR6_RT
self.rankp_NPT4_COMB = rankp_NPT4_COMB
self.rankp_PCTPELL = rankp_PCTPELL
self.rankp_ADJ_AVGFACSAL = rankp_ADJ_AVGFACSAL
self.rankp_ADJ_INEXPFTE = rankp_ADJ_INEXPFTE
self.rankp_PFTFAC = rankp_PFTFAC
self.rankp_COMB_RET_RATE = rankp_COMB_RET_RATE
self.adm_pct = adm_pct
self.IF_SAT_PRESENT = IF_SAT_PRESENT
self.IF_ACT_PRESENT = IF_ACT_PRESENT
if enable_search:
whooshalchemy.whoosh_index(flask_app, School_details)
whooshalchemy.whoosh_index(flask_app, Wiki_summary) | 0.425844 | 0.080177 |
import numpy as np
import scipy as sp
import scipy.spatial.distance
from .. import kernels
import itertools
import numpy.random as npr
import collections
Tile=collections.namedtuple('Tile',['look','grab','put'])
MultiTile=collections.namedtuple('MultiTile',['look','grab','put'])
def downsample_multitile(mt,ds):
return MultiTile(
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.look,ds)]),
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.grab,ds)]),
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.put,ds)]),
)
def tiles2multitiles(*tiles):
return MultiTile(
tuple([x.look for x in tiles]),
tuple([x.grab for x in tiles]),
tuple([x.put for x in tiles])
)
def tile_up(length,inner_sz,border_sz):
'''
batches a stretch of indices up into overlapping tiles.
Input:
- length
- inner_sz
- border_sz
Output is a list of Tile objects
For example, with
- length=24
- inner_sz=5
- border_sz=2, the output should be 5 tiles with:
lookblocks grabblocks putblocks
[0,7] [0,5] [0,5]
[3,12] [2,7] [5,10]
[8,17] [2,7] [10,15]
[13,22] [2,7] [15,20]
[18,24] [2,6] [20,24]
1 1 1 1 1 1 1 1 1 1 2 2 2 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
. . . . . . . . . . . . . . . . . . . . . . . .|
0 1 2 3 4 5 6 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5|
'''
ib = inner_sz+border_sz
ib2 = inner_sz + border_sz*2
if ib >= length:
# only one tile!
return [Tile(slice(0,length),slice(0,length),slice(0,length))]
else:
lookblocks=[]
grabblocks=[]
putblocks=[]
lookblocks.append(slice(0,ib))
grabblocks.append(slice(0,inner_sz))
putblocks.append(slice(0,inner_sz))
def get_next_block(st):
'''
creates another block, with lookblock starting
at st
'''
en = st+ib2
if en>length:
# uh oh. this is our last tile!
lookblocks.append(slice(st,length))
grabblocks.append(slice(border_sz,length-st))
putblocks.append(slice(st+border_sz,length))
return False
else:
# regular old tile
lookblocks.append(slice(st,en))
grabblocks.append(slice(border_sz,ib))
putblocks.append(slice(st+border_sz,en-border_sz))
return True
while get_next_block(putblocks[-1].stop-border_sz):
pass
return [Tile(*x) for x in zip(lookblocks,grabblocks,putblocks)]
def tile_up_nd(shp,inner_szs,border_szs):
'''
Input:
- shape
- inner_szs (one for each dim in shape)
- border_szs(one for each dim in shape)
Output
- a list of MultiTile objects
'''
lgps = [tile_up(sh,i,b) for (sh,i,b) in zip(shp,inner_szs,border_szs)]
tiles=list(itertools.product(*lgps))
mt=[tiles2multitiles(*x) for x in tiles]
return mt | bardensr/singlefov/tiling.py | import numpy as np
import scipy as sp
import scipy.spatial.distance
from .. import kernels
import itertools
import numpy.random as npr
import collections
Tile=collections.namedtuple('Tile',['look','grab','put'])
MultiTile=collections.namedtuple('MultiTile',['look','grab','put'])
def downsample_multitile(mt,ds):
return MultiTile(
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.look,ds)]),
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.grab,ds)]),
tuple([slice(x.start//d,x.stop//d) for x,d in zip(mt.put,ds)]),
)
def tiles2multitiles(*tiles):
return MultiTile(
tuple([x.look for x in tiles]),
tuple([x.grab for x in tiles]),
tuple([x.put for x in tiles])
)
def tile_up(length,inner_sz,border_sz):
'''
batches a stretch of indices up into overlapping tiles.
Input:
- length
- inner_sz
- border_sz
Output is a list of Tile objects
For example, with
- length=24
- inner_sz=5
- border_sz=2, the output should be 5 tiles with:
lookblocks grabblocks putblocks
[0,7] [0,5] [0,5]
[3,12] [2,7] [5,10]
[8,17] [2,7] [10,15]
[13,22] [2,7] [15,20]
[18,24] [2,6] [20,24]
1 1 1 1 1 1 1 1 1 1 2 2 2 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
. . . . . . . . . . . . . . . . . . . . . . . .|
0 1 2 3 4 5 6 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5 6 7 8 |
0 1 2 3 4 5|
'''
ib = inner_sz+border_sz
ib2 = inner_sz + border_sz*2
if ib >= length:
# only one tile!
return [Tile(slice(0,length),slice(0,length),slice(0,length))]
else:
lookblocks=[]
grabblocks=[]
putblocks=[]
lookblocks.append(slice(0,ib))
grabblocks.append(slice(0,inner_sz))
putblocks.append(slice(0,inner_sz))
def get_next_block(st):
'''
creates another block, with lookblock starting
at st
'''
en = st+ib2
if en>length:
# uh oh. this is our last tile!
lookblocks.append(slice(st,length))
grabblocks.append(slice(border_sz,length-st))
putblocks.append(slice(st+border_sz,length))
return False
else:
# regular old tile
lookblocks.append(slice(st,en))
grabblocks.append(slice(border_sz,ib))
putblocks.append(slice(st+border_sz,en-border_sz))
return True
while get_next_block(putblocks[-1].stop-border_sz):
pass
return [Tile(*x) for x in zip(lookblocks,grabblocks,putblocks)]
def tile_up_nd(shp,inner_szs,border_szs):
'''
Input:
- shape
- inner_szs (one for each dim in shape)
- border_szs(one for each dim in shape)
Output
- a list of MultiTile objects
'''
lgps = [tile_up(sh,i,b) for (sh,i,b) in zip(shp,inner_szs,border_szs)]
tiles=list(itertools.product(*lgps))
mt=[tiles2multitiles(*x) for x in tiles]
return mt | 0.233619 | 0.42662 |
import sys
import json
import socket
import spotipy
import asyncio
import webbrowser
from time import time
from spotipy import oauth2
from config import *
def listen_for_callback_code():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', int(redirect_uri.split(":")[-1])))
s.listen(1)
while True:
connection, address = s.accept()
buf = str(connection.recv(1024))
if len(buf) > 0:
break
start_code = buf.find("?code=") + 6
end_code = buf.find(" ", start_code)
if "&" in buf[start_code:end_code]:
end_code = buf.find("&")
return buf[start_code:end_code]
async def get_spotify_auth_code():
auth_url = sp_oauth.get_authorize_url()
webbrowser.open(auth_url)
async def async_get_auth_code():
task = asyncio.create_task(get_spotify_auth_code())
await task
return listen_for_callback_code()
def do_spotify_oauth():
try:
with open("token.json", "r") as fh:
token = fh.read()
token = json.loads(token)
except:
token = None
if token:
if int(time()) > token["expires_at"]:
token = sp_oauth.refresh_access_token(token["refresh_token"])
else:
authorization_code = asyncio.run(async_get_auth_code())
print(authorization_code)
if not authorization_code:
print("\n[!] Unable to authenticate to Spotify. Couldn't get authorization code")
sys.exit(-1)
token = sp_oauth.get_access_token(authorization_code)
if not token:
print("\n[!] Unable to authenticate to Spotify. Couldn't get access token.")
sys.exit(-1)
try:
with open("token.json", "w+") as fh:
fh.write(json.dumps(token))
except:
print("\n[!] Unable to to write token object to disk. This is non-fatal.")
return token
def get_all_playlists():
playlists_pager = spotify.user_playlists(username)
playlists = playlists_pager["items"]
while playlists_pager["next"]:
playlists_pager = spotify.next(playlists_pager)
playlists.extend(playlists_pager["items"])
return playlists
def create_playlist(playlist_name):
playlist = spotify.user_playlist_create(username, playlist_name)
return playlist["id"]
def get_playlist_id(playlist_name):
playlists = get_all_playlists()
for playlist in playlists:
if playlist["name"] == playlist_name:
return playlist["id"]
return None
def do_durations_match(source_track_duration, found_track_duration):
if source_track_duration == found_track_duration:
print("\t\t\t\t[+] Durations match")
return True
else:
print("\t\t\t\t[!] Durations do not match")
return False
def most_popular_track(tracks):
# Popularity does not always yield the correct result
high_score = 0
winner = None
for track in tracks:
if track["popularity"] > high_score:
winner = track["id"]
high_score = track["popularity"]
return winner
def best_of_multiple_matches(source_track, found_tracks):
counter = 1
duration_matches = [0, ]
for track in found_tracks:
print("\t\t\t[+] Match {}: {}".format(counter, track["id"]))
if do_durations_match(source_track["duration_ms"], track["duration_ms"]):
duration_matches[0] += 1
duration_matches.append(track)
counter += 1
if duration_matches[0] == 1:
best_track = duration_matches.pop()["id"]
print("\t\t\t[+] Only one exact match with matching duration, going with that one: {}".format(best_track))
return best_track
# TODO: Popularity does not always yield the correct result
best_track = most_popular_track(found_tracks)
print("\t\t\t[+] Multiple exact matches with matching durations, going with the most popular one: {}".format(best_track))
return best_track
def search_for_track(track):
# TODO: This is repetitive, can probably refactor but works for now
print("\n[+] Searching for track: {}{}by {} on {}".format(track["name"], " " if not track["mix"] else " ({}) ".format(track["mix"]), ", ".join(track["artists"]), track["release"]))
# Search with Title, Mix, Artists, and Release / Album
query = "{}{}{} {}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]), " ".join(track["artists"]), track["release"])
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, mix, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, mix, artists, and release.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o release
print("\t\t[+] No exact matches on name, mix, artists, and release. Trying without release.")
# Search with Title, Mix, and Artists
query = "{}{}{}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]), " ".join(track["artists"]))
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, mix, and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, mix, and artists.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix, but with release
print("\t\t[+] No exact matches on name, mix, and artists. Trying without mix, but with release.")
query = "{} {} {}".format(track["name"], " ".join(track["artists"]), track["release"])
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, artists, and release.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix or release
print("\t\t[+] No exact matches on name, artists, and release. Trying with just name and artists.")
query = "{} {}".format(track["name"], " ".join(track["artists"]))
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name and artists.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
print("\t\t[+] No exact matches on name and artists.")
print("\t[!] Could not find this song on Spotify!")
return None
def track_in_playlist(playlist_id, track_id):
for track in get_all_tracks_in_playlist(playlist_id):
if track["track"]["id"] == track_id:
return True
return False
def add_tracks_to_playlist(playlist_id, track_ids):
if track_ids:
spotify.user_playlist_add_tracks(username, playlist_id, track_ids)
def get_all_tracks_in_playlist(playlist_id):
if playlist_id in playlist_track_cache:
return playlist_track_cache[playlist_id]
playlist_tracks_results = spotify.playlist(playlist_id, fields="tracks")
playlist_tracks_pager = playlist_tracks_results["tracks"]
playlist_tracks = playlist_tracks_pager["items"]
while playlist_tracks_pager["next"]:
playlist_tracks_pager = spotify.next(playlist_tracks_pager)
playlist_tracks.extend(playlist_tracks_pager["items"])
playlist_track_cache[playlist_id] = playlist_tracks
return playlist_track_cache[playlist_id]
def clear_playlist(playlist_id):
for track in get_all_tracks_in_playlist(playlist_id):
spotify.user_playlist_remove_all_occurrences_of_tracks(username, playlist_id, [track["track"]["id"],])
def add_new_tracks_to_playlist(genre, tracks_dict):
persistent_top_100_playlist_name = "Beatporter: {} - Top 100".format(genre)
daily_top_10_playlist_name = "Beatporter: {} - Daily Top 10".format(genre)
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_top_100_playlist_name))
playlists = [{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)},
{"name": daily_top_10_playlist_name, "id": get_playlist_id(daily_top_10_playlist_name)}]
for playlist in playlists:
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
# Clear daily playlist
clear_playlist(playlists[1]["id"])
persistent_top_100_track_ids = list()
daily_top_10_track_ids = list()
track_count = 0
for track in tracks_dict:
track_id = search_for_track(track)
if track_id and not track_in_playlist(playlists[0]["id"], track_id):
persistent_top_100_track_ids.append(track_id)
if track_id and track_count < 10:
daily_top_10_track_ids.append(track_id)
track_count += 1
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_top_100_track_ids), persistent_top_100_playlist_name))
add_tracks_to_playlist(playlists[0]["id"], persistent_top_100_track_ids)
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(daily_top_10_track_ids), daily_top_10_playlist_name))
add_tracks_to_playlist(playlists[1]["id"], daily_top_10_track_ids)
playlist_track_cache = dict()
# Get authenticated to Spotify on import
sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri, username=username, scope=scope)
token_info = do_spotify_oauth()
spotify = spotipy.Spotify(auth=token_info["access_token"], requests_timeout=120) | spotify.py | import sys
import json
import socket
import spotipy
import asyncio
import webbrowser
from time import time
from spotipy import oauth2
from config import *
def listen_for_callback_code():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', int(redirect_uri.split(":")[-1])))
s.listen(1)
while True:
connection, address = s.accept()
buf = str(connection.recv(1024))
if len(buf) > 0:
break
start_code = buf.find("?code=") + 6
end_code = buf.find(" ", start_code)
if "&" in buf[start_code:end_code]:
end_code = buf.find("&")
return buf[start_code:end_code]
async def get_spotify_auth_code():
auth_url = sp_oauth.get_authorize_url()
webbrowser.open(auth_url)
async def async_get_auth_code():
task = asyncio.create_task(get_spotify_auth_code())
await task
return listen_for_callback_code()
def do_spotify_oauth():
try:
with open("token.json", "r") as fh:
token = fh.read()
token = json.loads(token)
except:
token = None
if token:
if int(time()) > token["expires_at"]:
token = sp_oauth.refresh_access_token(token["refresh_token"])
else:
authorization_code = asyncio.run(async_get_auth_code())
print(authorization_code)
if not authorization_code:
print("\n[!] Unable to authenticate to Spotify. Couldn't get authorization code")
sys.exit(-1)
token = sp_oauth.get_access_token(authorization_code)
if not token:
print("\n[!] Unable to authenticate to Spotify. Couldn't get access token.")
sys.exit(-1)
try:
with open("token.json", "w+") as fh:
fh.write(json.dumps(token))
except:
print("\n[!] Unable to to write token object to disk. This is non-fatal.")
return token
def get_all_playlists():
playlists_pager = spotify.user_playlists(username)
playlists = playlists_pager["items"]
while playlists_pager["next"]:
playlists_pager = spotify.next(playlists_pager)
playlists.extend(playlists_pager["items"])
return playlists
def create_playlist(playlist_name):
playlist = spotify.user_playlist_create(username, playlist_name)
return playlist["id"]
def get_playlist_id(playlist_name):
playlists = get_all_playlists()
for playlist in playlists:
if playlist["name"] == playlist_name:
return playlist["id"]
return None
def do_durations_match(source_track_duration, found_track_duration):
if source_track_duration == found_track_duration:
print("\t\t\t\t[+] Durations match")
return True
else:
print("\t\t\t\t[!] Durations do not match")
return False
def most_popular_track(tracks):
# Popularity does not always yield the correct result
high_score = 0
winner = None
for track in tracks:
if track["popularity"] > high_score:
winner = track["id"]
high_score = track["popularity"]
return winner
def best_of_multiple_matches(source_track, found_tracks):
counter = 1
duration_matches = [0, ]
for track in found_tracks:
print("\t\t\t[+] Match {}: {}".format(counter, track["id"]))
if do_durations_match(source_track["duration_ms"], track["duration_ms"]):
duration_matches[0] += 1
duration_matches.append(track)
counter += 1
if duration_matches[0] == 1:
best_track = duration_matches.pop()["id"]
print("\t\t\t[+] Only one exact match with matching duration, going with that one: {}".format(best_track))
return best_track
# TODO: Popularity does not always yield the correct result
best_track = most_popular_track(found_tracks)
print("\t\t\t[+] Multiple exact matches with matching durations, going with the most popular one: {}".format(best_track))
return best_track
def search_for_track(track):
# TODO: This is repetitive, can probably refactor but works for now
print("\n[+] Searching for track: {}{}by {} on {}".format(track["name"], " " if not track["mix"] else " ({}) ".format(track["mix"]), ", ".join(track["artists"]), track["release"]))
# Search with Title, Mix, Artists, and Release / Album
query = "{}{}{} {}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]), " ".join(track["artists"]), track["release"])
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, mix, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, mix, artists, and release.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o release
print("\t\t[+] No exact matches on name, mix, artists, and release. Trying without release.")
# Search with Title, Mix, and Artists
query = "{}{}{}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]), " ".join(track["artists"]))
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, mix, and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, mix, and artists.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix, but with release
print("\t\t[+] No exact matches on name, mix, and artists. Trying without mix, but with release.")
query = "{} {} {}".format(track["name"], " ".join(track["artists"]), track["release"])
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name, artists, and release.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix or release
print("\t\t[+] No exact matches on name, artists, and release. Trying with just name and artists.")
query = "{} {}".format(track["name"], " ".join(track["artists"]))
print("\t[+] Search Query: {}".format(query))
search_results = spotify.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
print("\t\t[+] Found an exact match on name and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
print("\t\t[+] Found multiple exact matches ({}) on name and artists.".format(len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
print("\t\t[+] No exact matches on name and artists.")
print("\t[!] Could not find this song on Spotify!")
return None
def track_in_playlist(playlist_id, track_id):
for track in get_all_tracks_in_playlist(playlist_id):
if track["track"]["id"] == track_id:
return True
return False
def add_tracks_to_playlist(playlist_id, track_ids):
if track_ids:
spotify.user_playlist_add_tracks(username, playlist_id, track_ids)
def get_all_tracks_in_playlist(playlist_id):
if playlist_id in playlist_track_cache:
return playlist_track_cache[playlist_id]
playlist_tracks_results = spotify.playlist(playlist_id, fields="tracks")
playlist_tracks_pager = playlist_tracks_results["tracks"]
playlist_tracks = playlist_tracks_pager["items"]
while playlist_tracks_pager["next"]:
playlist_tracks_pager = spotify.next(playlist_tracks_pager)
playlist_tracks.extend(playlist_tracks_pager["items"])
playlist_track_cache[playlist_id] = playlist_tracks
return playlist_track_cache[playlist_id]
def clear_playlist(playlist_id):
for track in get_all_tracks_in_playlist(playlist_id):
spotify.user_playlist_remove_all_occurrences_of_tracks(username, playlist_id, [track["track"]["id"],])
def add_new_tracks_to_playlist(genre, tracks_dict):
persistent_top_100_playlist_name = "Beatporter: {} - Top 100".format(genre)
daily_top_10_playlist_name = "Beatporter: {} - Daily Top 10".format(genre)
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_top_100_playlist_name))
playlists = [{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)},
{"name": daily_top_10_playlist_name, "id": get_playlist_id(daily_top_10_playlist_name)}]
for playlist in playlists:
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
# Clear daily playlist
clear_playlist(playlists[1]["id"])
persistent_top_100_track_ids = list()
daily_top_10_track_ids = list()
track_count = 0
for track in tracks_dict:
track_id = search_for_track(track)
if track_id and not track_in_playlist(playlists[0]["id"], track_id):
persistent_top_100_track_ids.append(track_id)
if track_id and track_count < 10:
daily_top_10_track_ids.append(track_id)
track_count += 1
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_top_100_track_ids), persistent_top_100_playlist_name))
add_tracks_to_playlist(playlists[0]["id"], persistent_top_100_track_ids)
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(daily_top_10_track_ids), daily_top_10_playlist_name))
add_tracks_to_playlist(playlists[1]["id"], daily_top_10_track_ids)
playlist_track_cache = dict()
# Get authenticated to Spotify on import
sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri, username=username, scope=scope)
token_info = do_spotify_oauth()
spotify = spotipy.Spotify(auth=token_info["access_token"], requests_timeout=120) | 0.152221 | 0.120155 |
import csv
import xml.etree.ElementTree as ET
from json import load
from typing import Dict, List, Tuple, Text, Set
def webprocessing(a: Dict, b: List, c: Text, d: Text = "") -> Tuple[Dict, Text]:
"""
:param a: temporary account storage
:param b: ref to xml object (groups, accounts, logins)
:param c: switcher
:param d: group id or GID
:return: temporary storage and GID
"""
if c == "group" and d != "":
if d == "-5":
a["-5"] = {"Name": None}
return a, "-5"
else:
for i in b:
for j in i:
if a.get(f"{j.get('ID')}") is None and j.get('ID') == d:
a[f"{j.get('ID')}"] = {"Name": j.get("Name")}
return a, j.get("ID")
elif c == "account" and d != "":
for i in b:
for j in i:
GID_: Text = j.get("ParentID")
if a.get(GID_) is not None and GID_ == d:
loginlink: List = j.findall("LoginLinks/Login")
for login_ in loginlink:
if a[GID_].get("Account") is None:
if j.get("Comments") is None:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"] = [[j.get("Name"), j.get("ID"), j.get("Link"),
login_.get("SourceLoginID")]]
else:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"] = [[j.get("Name"), j.get("ID"), j.get("Link"), j.get("Comments"),
login_.get("SourceLoginID")]]
elif a[GID_].get("Account") is not None:
if j.get("Comments") is None:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"].append([j.get("Name"), j.get("ID"), j.get("Link"),
login_.get("SourceLoginID")])
else:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"].append([j.get("Name"), j.get("ID"), j.get("Link"), j.get("Comments"),
login_.get("SourceLoginID")])
return a, d
elif c == "login" and d != "":
for i in b:
for j in i:
record = a[d]["Account"]
for n in range(0, len(a[d]["Account"])):
if type(record[n][len(record[n]) - 1]) is not dict and type(record[n][len(record[n]) - 1]) is str:
if record[n][len(record[n]) - 1] == j.get("ID"):
a[d]["Account"][n][len(record[n]) - 1] = {"Login": j.get("Name"), "Password": <PASSWORD>("Password")}
return a, d
if __name__ == '__main__':
"""
"""
with open("config.json") as config:
cfg = load(config)
tree: List = ET.parse(cfg["stickypasswordWebPath"])
root = tree.getroot()
group: List = root.findall("./Database/Groups")
account: List = root.findall("./Database/Accounts")
login: List = root.findall("./Database/Logins")
storage: Dict = {}
control: Set = set()
for accounts in account:
for elem in accounts:
if elem.get("ParentID") not in control:
control.add(elem.get("ParentID"))
storage, GID = webprocessing(storage, group, "group", elem.get("ParentID"))
storage, GID = webprocessing(storage, account, "account", d=GID)
storage, GID = webprocessing(storage, login, "login", d=GID)
nordpasstemplate: List[Text] = ["name", "url", "username", "password", "note",
"cardholdername", "cardnumber", "cvc", "expirydate",
"zipcode", "folder", "full_name", "phone_number", "email",
"address1", "address2", "city", "country", "state"]
with open('nordpass.csv', "w", newline="", encoding="utf8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(nordpasstemplate)
for g in storage:
if g != "-5":
nordfolder: List = [storage[g]["Name"]]
for i in range(0, len(nordpasstemplate) - 1):
nordfolder.append("")
csvwriter.writerow(nordfolder)
for gids in storage:
for account in storage[gids]["Account"]:
if type(account[3]) is str:
nordrecord: List = [account[0], account[2], account[4]["Login"], account[4]["Password"], account[3]]
for i in range(len(nordpasstemplate) - 5):
nordrecord.append("")
if gids != "-5":
nordrecord[10] = storage[gids]["Name"]
elif type(account[3]) is dict:
nordrecord: List = [account[0], account[2], account[3]["Login"], account[3]["Password"]]
for i in range(len(nordpasstemplate) - 4):
nordrecord.append("")
if gids != "-5":
nordrecord[10] = storage[gids]["Name"]
csvwriter.writerow(nordrecord) | passexporter.py | import csv
import xml.etree.ElementTree as ET
from json import load
from typing import Dict, List, Tuple, Text, Set
def webprocessing(a: Dict, b: List, c: Text, d: Text = "") -> Tuple[Dict, Text]:
"""
:param a: temporary account storage
:param b: ref to xml object (groups, accounts, logins)
:param c: switcher
:param d: group id or GID
:return: temporary storage and GID
"""
if c == "group" and d != "":
if d == "-5":
a["-5"] = {"Name": None}
return a, "-5"
else:
for i in b:
for j in i:
if a.get(f"{j.get('ID')}") is None and j.get('ID') == d:
a[f"{j.get('ID')}"] = {"Name": j.get("Name")}
return a, j.get("ID")
elif c == "account" and d != "":
for i in b:
for j in i:
GID_: Text = j.get("ParentID")
if a.get(GID_) is not None and GID_ == d:
loginlink: List = j.findall("LoginLinks/Login")
for login_ in loginlink:
if a[GID_].get("Account") is None:
if j.get("Comments") is None:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"] = [[j.get("Name"), j.get("ID"), j.get("Link"),
login_.get("SourceLoginID")]]
else:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"] = [[j.get("Name"), j.get("ID"), j.get("Link"), j.get("Comments"),
login_.get("SourceLoginID")]]
elif a[GID_].get("Account") is not None:
if j.get("Comments") is None:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"].append([j.get("Name"), j.get("ID"), j.get("Link"),
login_.get("SourceLoginID")])
else:
if login_.get("ParentID") == j.get("ID"):
a[GID_]["Account"].append([j.get("Name"), j.get("ID"), j.get("Link"), j.get("Comments"),
login_.get("SourceLoginID")])
return a, d
elif c == "login" and d != "":
for i in b:
for j in i:
record = a[d]["Account"]
for n in range(0, len(a[d]["Account"])):
if type(record[n][len(record[n]) - 1]) is not dict and type(record[n][len(record[n]) - 1]) is str:
if record[n][len(record[n]) - 1] == j.get("ID"):
a[d]["Account"][n][len(record[n]) - 1] = {"Login": j.get("Name"), "Password": <PASSWORD>("Password")}
return a, d
if __name__ == '__main__':
"""
"""
with open("config.json") as config:
cfg = load(config)
tree: List = ET.parse(cfg["stickypasswordWebPath"])
root = tree.getroot()
group: List = root.findall("./Database/Groups")
account: List = root.findall("./Database/Accounts")
login: List = root.findall("./Database/Logins")
storage: Dict = {}
control: Set = set()
for accounts in account:
for elem in accounts:
if elem.get("ParentID") not in control:
control.add(elem.get("ParentID"))
storage, GID = webprocessing(storage, group, "group", elem.get("ParentID"))
storage, GID = webprocessing(storage, account, "account", d=GID)
storage, GID = webprocessing(storage, login, "login", d=GID)
nordpasstemplate: List[Text] = ["name", "url", "username", "password", "note",
"cardholdername", "cardnumber", "cvc", "expirydate",
"zipcode", "folder", "full_name", "phone_number", "email",
"address1", "address2", "city", "country", "state"]
with open('nordpass.csv', "w", newline="", encoding="utf8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(nordpasstemplate)
for g in storage:
if g != "-5":
nordfolder: List = [storage[g]["Name"]]
for i in range(0, len(nordpasstemplate) - 1):
nordfolder.append("")
csvwriter.writerow(nordfolder)
for gids in storage:
for account in storage[gids]["Account"]:
if type(account[3]) is str:
nordrecord: List = [account[0], account[2], account[4]["Login"], account[4]["Password"], account[3]]
for i in range(len(nordpasstemplate) - 5):
nordrecord.append("")
if gids != "-5":
nordrecord[10] = storage[gids]["Name"]
elif type(account[3]) is dict:
nordrecord: List = [account[0], account[2], account[3]["Login"], account[3]["Password"]]
for i in range(len(nordpasstemplate) - 4):
nordrecord.append("")
if gids != "-5":
nordrecord[10] = storage[gids]["Name"]
csvwriter.writerow(nordrecord) | 0.466359 | 0.280687 |
import json
from django.db import models
from django.forms import model_to_dict
import six
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
} | entity_event/context_serializer.py | import json
from django.db import models
from django.forms import model_to_dict
import six
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
} | 0.786869 | 0.3027 |
import os
import json
import pickle
import pandas as pd
import tensorflow as tf
from NCF import NCF
from dataset.dataset import Dataset
from dataset.python_splitters import python_chrono_split
from evaluate import evaluate_model_spark
from grid_search import GridSearch
flags = tf.app.flags
flags.DEFINE_string("data", ".", "Path to data file")
flags.DEFINE_integer("epoch", 100, "Epoch to train [100]")
flags.DEFINE_integer("batch_size", 128, "The size of batch [128]")
flags.DEFINE_integer("factors", 8, "The number of latent factors [8]")
flags.DEFINE_float("learning_rate", 5e-3, "The learning rate [5e-3]")
flags.DEFINE_boolean(
"hpo", False, "Enable hyperparameter optimization [False]")
flags.DEFINE_string("delimiter", ",", "")
header = ("userID", "itemID", "rating", "timestamp")
def create_dataset(data_path, split=0.0):
df = pd.read_csv(
data_path,
engine="python",
names=header,
header=1,
sep=flags.FLAGS.delimiter
)
if split == 0.0:
return Dataset(df)
else:
train, test = python_chrono_split(df, split)
return Dataset(train, test)
def train_model(data, checkpoint_path, model_type="NeuMF", n_factors=flags.FLAGS.factors, layer_sizes=[16, 8, 4],
n_epochs=flags.FLAGS.epoch, batch_size=flags.FLAGS.batch_size, learning_rate=flags.FLAGS.learning_rate,):
parameters = flags.FLAGS.flag_values_dict()
parameters["n_users"] = data.n_users
parameters["n_items"] = data.n_items
model = NCF(
n_users=data.n_users,
n_items=data.n_items,
model_type=model_type,
n_factors=n_factors,
layer_sizes=layer_sizes,
n_epochs=n_epochs,
batch_size=batch_size,
learning_rate=learning_rate
)
model.fit(data)
model.save(dir_name=checkpoint_path)
# Save ID mapping
with open(checkpoint_path + '/user_mapping.p', 'wb') as fp:
pickle.dump(model.user2id, fp, protocol=pickle.HIGHEST_PROTOCOL)
with open(checkpoint_path + '/item_mapping.p', 'wb') as fp:
pickle.dump(model.item2id, fp, protocol=pickle.HIGHEST_PROTOCOL)
# Save parameters
with open(checkpoint_path + '/parameters.p', 'wb') as fp:
pickle.dump(parameters, fp, protocol=pickle.HIGHEST_PROTOCOL)
with open(checkpoint_path + '/parameters.json', 'w') as fp:
json.dump(parameters, fp)
return model
def main():
model_path = ""
data_path = ""
if "RESULT_DIR" in os.environ:
model_path = os.environ["RESULT_DIR"]
if "DATA_DIR" in os.environ:
data_path = os.environ["DATA_DIR"]
checkpoint_path = os.path.join(model_path, "model", "checkpoint")
data_path = os.path.join(data_path, flags.FLAGS.data)
data = create_dataset(data_path, split=0.8)
if flags.FLAGS.hpo:
# Check if HPO flags set
print("Running hyperparameter optimization")
params = {"learning_rate": [1e-3, 5e-3, 1e-2],
"n_factors": [8, 16, 32], "n_epochs": [50, 100]}
grid = GridSearch(model_fn=NCF, param_grid=params,
scoring_fn=evaluate_model_spark)
optimized_params = grid.run(data)
full_data = create_dataset(data_path)
train_model(full_data, checkpoint_path, **optimized_params)
else:
train_model(data, checkpoint_path)
if __name__ == "__main__":
main() | training/training_code/train_ncf.py | import os
import json
import pickle
import pandas as pd
import tensorflow as tf
from NCF import NCF
from dataset.dataset import Dataset
from dataset.python_splitters import python_chrono_split
from evaluate import evaluate_model_spark
from grid_search import GridSearch
flags = tf.app.flags
flags.DEFINE_string("data", ".", "Path to data file")
flags.DEFINE_integer("epoch", 100, "Epoch to train [100]")
flags.DEFINE_integer("batch_size", 128, "The size of batch [128]")
flags.DEFINE_integer("factors", 8, "The number of latent factors [8]")
flags.DEFINE_float("learning_rate", 5e-3, "The learning rate [5e-3]")
flags.DEFINE_boolean(
"hpo", False, "Enable hyperparameter optimization [False]")
flags.DEFINE_string("delimiter", ",", "")
header = ("userID", "itemID", "rating", "timestamp")
def create_dataset(data_path, split=0.0):
df = pd.read_csv(
data_path,
engine="python",
names=header,
header=1,
sep=flags.FLAGS.delimiter
)
if split == 0.0:
return Dataset(df)
else:
train, test = python_chrono_split(df, split)
return Dataset(train, test)
def train_model(data, checkpoint_path, model_type="NeuMF", n_factors=flags.FLAGS.factors, layer_sizes=[16, 8, 4],
n_epochs=flags.FLAGS.epoch, batch_size=flags.FLAGS.batch_size, learning_rate=flags.FLAGS.learning_rate,):
parameters = flags.FLAGS.flag_values_dict()
parameters["n_users"] = data.n_users
parameters["n_items"] = data.n_items
model = NCF(
n_users=data.n_users,
n_items=data.n_items,
model_type=model_type,
n_factors=n_factors,
layer_sizes=layer_sizes,
n_epochs=n_epochs,
batch_size=batch_size,
learning_rate=learning_rate
)
model.fit(data)
model.save(dir_name=checkpoint_path)
# Save ID mapping
with open(checkpoint_path + '/user_mapping.p', 'wb') as fp:
pickle.dump(model.user2id, fp, protocol=pickle.HIGHEST_PROTOCOL)
with open(checkpoint_path + '/item_mapping.p', 'wb') as fp:
pickle.dump(model.item2id, fp, protocol=pickle.HIGHEST_PROTOCOL)
# Save parameters
with open(checkpoint_path + '/parameters.p', 'wb') as fp:
pickle.dump(parameters, fp, protocol=pickle.HIGHEST_PROTOCOL)
with open(checkpoint_path + '/parameters.json', 'w') as fp:
json.dump(parameters, fp)
return model
def main():
model_path = ""
data_path = ""
if "RESULT_DIR" in os.environ:
model_path = os.environ["RESULT_DIR"]
if "DATA_DIR" in os.environ:
data_path = os.environ["DATA_DIR"]
checkpoint_path = os.path.join(model_path, "model", "checkpoint")
data_path = os.path.join(data_path, flags.FLAGS.data)
data = create_dataset(data_path, split=0.8)
if flags.FLAGS.hpo:
# Check if HPO flags set
print("Running hyperparameter optimization")
params = {"learning_rate": [1e-3, 5e-3, 1e-2],
"n_factors": [8, 16, 32], "n_epochs": [50, 100]}
grid = GridSearch(model_fn=NCF, param_grid=params,
scoring_fn=evaluate_model_spark)
optimized_params = grid.run(data)
full_data = create_dataset(data_path)
train_model(full_data, checkpoint_path, **optimized_params)
else:
train_model(data, checkpoint_path)
if __name__ == "__main__":
main() | 0.617167 | 0.204362 |
import argparse
import logging
import math
import random
from argparse import ArgumentParser, ArgumentTypeError, FileType
import ignite
import torch
from ignite.engine import Engine, Events, State, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import EarlyStopping, Timer
from ignite.metrics import BinaryAccuracy, Loss, Precision, Recall
from torch.nn import Module
from torch.optim import Adam, Optimizer
from torch.utils.data import ConcatDataset, DataLoader, Dataset
from torch.utils.data.dataset import Subset, random_split
from sock.model.data import WordEmbeddings, tokenize
from sock.model.data.batching import sentence_label_pad, sentence_pad
from sock.model.dataset import (CresciTweetDataset, Five38TweetDataset, LabelDataset, NbcTweetDataset,
SingleLabelDataset, TweetTensorDataset)
from sock.model.nn import ContextualLSTM
from sock.model.serial import load, save
from sock.utils import BOT, NOT_BOT, Metrics, Splits, expand_binary_class, split_integers, to_singleton_row
def positive_int(arg: str) -> int:
i = int(arg)
if i <= 0:
raise ArgumentTypeError(f"{i} is not a positive integer")
return i
def positive_finite_float(arg: str) -> float:
f = float(arg)
if f <= 0 or math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a positive and finite number")
return f
def nonzero_finite_float(arg: str) -> float:
f = float(arg)
if math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a finite nonzero number")
return f
def nonzero_fraction(arg: str) -> float:
f = float(arg)
if f <= 0.0 or f >= 1.0:
raise ArgumentTypeError(f"{f} is not between 0 and 1 (exclusive)")
return f
def build_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Train a model"
)
data_args = parser.add_argument_group("Data")
data_args.add_argument(
"--glove",
help="The word vector embeddings to use",
metavar="path",
type=FileType('r', encoding="utf8"),
required=True
)
data_args.add_argument(
"--bots",
help="One or more files containing tweets known to be from bots",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--humans",
help="One or more files containing tweets known to be from humans",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--max-tweets",
help="The maximum number of the given tweets to use in training the model. Default: all tweets.",
metavar="max",
type=positive_int
)
data_args.add_argument(
"--output",
help="Location to save the trained model",
metavar="out",
type=FileType("wb"),
required=True
)
optimizer_hyperparams = parser.add_argument_group("Optimizer Hyperparameters")
optimizer_hyperparams.add_argument(
"--lr",
help="Learning rate (default: %(default)s)",
type=positive_finite_float,
default=1e-3,
metavar="lr"
)
optimizer_hyperparams.add_argument(
"--eps",
help="Term added to the denominator to improve numerical stability (default: %(default)s)",
type=positive_finite_float,
default=1e-8,
metavar="e"
)
optimizer_hyperparams.add_argument(
"--beta0",
help="First coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.9,
metavar="b0"
)
optimizer_hyperparams.add_argument(
"--beta1",
help="Second coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.999,
metavar="b1"
)
optimizer_hyperparams.add_argument(
"--weight-decay",
help="Weight decay (L2 penalty) (default: %(default)s)",
type=nonzero_finite_float,
default=0.0,
metavar="wd"
)
optimizer_hyperparams.add_argument(
"--amsgrad",
help="Whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond (default: %(default)s)",
action="store_true"
)
lr_hyperparams = parser.add_argument_group("LR Scheduler Hyperparameters")
lr_hyperparams.add_argument(
"--lr-patience",
help="If no improvement after this many epochs, reduce the learning rate (default: %(default)s)",
type=positive_int,
default=3,
metavar="patience"
)
training_hyperparams = parser.add_argument_group("Training Hyperparameters")
training_hyperparams.add_argument(
"--max-epochs",
help="The maximum number of passes to make over the input data (default: %(default)s)",
type=positive_int,
default=50,
metavar="epochs"
)
training_hyperparams.add_argument(
"--trainer-patience",
help="If no improvement after this many epochs, end the training (default: %(default)s)",
type=positive_int,
default=10,
metavar="patience"
)
training_hyperparams.add_argument(
"--batch-size",
help="The number of tweets to process at once (default: %(default)s)",
metavar="size",
type=positive_int,
default=500
)
training_hyperparams.add_argument(
"--train-split",
help="Fraction of input data set aside for training the model (default: %(default)s)",
type=nonzero_fraction,
default=0.5
)
training_hyperparams.add_argument(
"--valid-split",
help="Fraction of input data set aside for tuning hyperparameters (default: %(default)s)",
type=nonzero_fraction,
default=0.2
)
training_hyperparams.add_argument(
"--test-split",
help="Fraction of input data set aside for evaluating model performance (default: %(default)s)",
type=nonzero_fraction,
default=0.3
)
return parser
def validate_args(args):
if args.beta0 >= args.beta1:
raise ArgumentTypeError(f"{args.beta0} is not less than {args.beta1}")
if args.train_split + args.valid_split + args.test_split != 1.0:
raise ArgumentTypeError(f"{args.train_split}, {args.valid_split}, and {args.test_split} do not add to 1")
def load_tweets(file, embeddings: WordEmbeddings) -> Dataset:
try:
logging.debug("Loading %s as a Cresci-format dataset", file.name)
tweets = CresciTweetDataset(file.name)
logging.info("Loaded %s as a Cresci-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a Cresci-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a NBC-format dataset", file.name)
tweets = NbcTweetDataset(file.name)
logging.info("Loaded %s as a NBC-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a NBC-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a 538-format dataset", file.name)
tweets = Five38TweetDataset(file.name)
logging.info("Loaded %s as a 538-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a 538-format dataset: %s", file.name, e)
logging.error("Could not load %s as a tweet dataset!", file.name)
raise ValueError(f"Could not load {file.name} as a tweet dataset")
def load_glove(args) -> WordEmbeddings:
logging.info("Loading GloVe embeddings from %s", args.glove.name)
embeddings = WordEmbeddings(args.glove, device="cuda")
logging.info(
"Loaded GloVe embeddings from %s (dim=%d, device=%s, len=%d)",
args.glove.name,
embeddings.dim,
embeddings.device,
len(embeddings)
)
return embeddings
def create_model(args, glove: WordEmbeddings) -> ContextualLSTM:
model = ContextualLSTM(glove, device="cuda")
model.to(device="cuda")
logging.info("Created ContextualLSTM to train (device=%s)", model.device)
return model
def create_optimizer(args, model: ContextualLSTM) -> Optimizer:
# TODO: Exclude embedding weights from Adam
optimizer = Adam(
model.parameters(),
lr=args.lr,
betas=(args.beta0, args.beta1),
eps=args.eps,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
logging.info(
"Created Adam optimizer (lr=%g, betas=(%g, %g), eps=%g, weight_decay=%g, amsgrad=%s)",
args.lr,
args.beta0,
args.beta1,
args.eps,
args.weight_decay,
args.amsgrad
)
return optimizer
def load_tweet_datasets(args, datasets, type: str, glove: WordEmbeddings) -> Dataset:
loaded = []
for d in datasets:
logging.info("Loading known %ss from %s", type, d.name)
loaded.append(load_tweets(d, glove))
dataset = None
if len(loaded) == 1:
dataset = loaded[0]
else:
dataset = ConcatDataset(loaded)
if args.max_tweets is not None:
indices = random.sample(range(len(dataset)), args.max_tweets // 2)
dataset = Subset(dataset, indices)
logging.info("Loaded %d %s datasets with %d tweets", len(loaded), type, len(dataset))
return dataset
def create_splits(args, type: str, data: Dataset) -> Splits:
length = len(data)
split_lengths = split_integers(length, (args.train_split, args.valid_split, args.test_split))
logging.info(
"Splitting %d %s tweets (train=%g, valid=%g, test=%g)",
length,
type,
args.train_split,
args.valid_split,
args.test_split
)
splits = random_split(data, split_lengths)
logging.info("Split %d %s tweets (train=%d, valid=%d, test=%d)", length, type, *split_lengths)
return Splits(data, *splits)
def create_loader(args, human: DataLoader, bot: DataLoader, subset: str) -> DataLoader:
human = SingleLabelDataset(human, NOT_BOT)
bot = SingleLabelDataset(bot, BOT)
dataset = ConcatDataset([human, bot])
dataloader = DataLoader(dataset=dataset, shuffle=True, batch_size=args.batch_size, collate_fn=sentence_label_pad)
logging.info("Created a %s DataLoader (len=%d, batch_size=%d)", subset, len(dataset), args.batch_size)
return dataloader
def create_lr_scheduler(args, optimizer: Optimizer):
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=args.lr_patience)
return scheduler
def create_evaluator(model: ContextualLSTM, cost: Module):
evaluator = create_supervised_evaluator(
model,
metrics={
"loss": Loss(cost, output_transform=to_singleton_row),
"accuracy": BinaryAccuracy(output_transform=to_singleton_row),
"recall": Recall(average=True, output_transform=expand_binary_class),
"precision": Precision(average=True, output_transform=expand_binary_class),
}
)
evaluator._logger.setLevel(logging.WARNING)
return evaluator
def create_trainer(
args,
model: ContextualLSTM,
optimizer: Optimizer,
cost: Module,
evaluator: Engine,
scheduler,
training_data: DataLoader,
validation_data: DataLoader
):
model.train(True)
trainer = ignite.engine.create_supervised_trainer(model, optimizer, cost, model.device)
trainer.state = ignite.engine.State()
@trainer.on(Events.COMPLETED)
def finish_training(trainer: Engine):
model.train(False)
logging.info("Finished training and evaluation")
@trainer.on(Events.STARTED)
def init_metrics(trainer: Engine):
trainer.state.training_metrics = Metrics([], [], [], [])
trainer.state.validation_metrics = Metrics([], [], [], [])
logging.info("Initialized metrics")
@trainer.on(Events.EPOCH_COMPLETED)
def validate(trainer: Engine):
training_metrics = evaluator.run(training_data).metrics # type: Dict[str, float]
trainer.state.training_metrics.loss.append(training_metrics["loss"])
trainer.state.training_metrics.accuracy.append(training_metrics["accuracy"])
trainer.state.training_metrics.recall.append(training_metrics["recall"])
trainer.state.training_metrics.precision.append(training_metrics["precision"])
logging.info(
"[%d / %d] Train: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
training_metrics["loss"],
training_metrics["accuracy"],
training_metrics["recall"],
training_metrics["precision"]
)
validation_metrics = evaluator.run(validation_data).metrics # type: Dict[str, float]
trainer.state.validation_metrics.loss.append(validation_metrics["loss"])
trainer.state.validation_metrics.accuracy.append(validation_metrics["accuracy"])
trainer.state.validation_metrics.recall.append(validation_metrics["recall"])
trainer.state.validation_metrics.precision.append(validation_metrics["precision"])
logging.info(
"[%d / %d] Valid: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
validation_metrics["loss"],
validation_metrics["accuracy"],
validation_metrics["recall"],
validation_metrics["precision"]
)
scheduler.step(validation_metrics["loss"])
timer = Timer(average=True)
@trainer.on(Events.COMPLETED)
def record_time(trainer: Engine):
trainer.state.duration = timer.value()
def score_function(trainer: Engine) -> float:
return -trainer.state.validation_metrics.loss[-1]
handler = EarlyStopping(patience=args.trainer_patience, score_function=score_function, trainer=trainer)
trainer.add_event_handler(Events.EPOCH_COMPLETED, handler)
timer.attach(trainer, start=Events.STARTED, pause=Events.COMPLETED)
trainer._logger.setLevel(logging.WARNING)
return trainer
def main():
logging.basicConfig(
format='[%(levelname)s %(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
parser = build_parser()
args = parser.parse_args()
if not torch.cuda.is_available():
raise RuntimeError("CUDA is required but not available")
if not torch.backends.cudnn.is_available():
raise RuntimeError("CUDNN is required but not available")
validate_args(args)
cost = torch.nn.BCELoss()
glove = load_glove(args)
bots = load_tweet_datasets(args, args.bots, "bot", glove)
humans = load_tweet_datasets(args, args.humans, "human", glove)
bot_splits = create_splits(args, "bot", bots)
human_splits = create_splits(args, "human", humans)
training_data = create_loader(args, human_splits.training, bot_splits.training, "training")
validation_data = create_loader(args, human_splits.validation, bot_splits.validation, "validation")
testing_data = create_loader(args, human_splits.testing, bot_splits.testing, "testing")
model = create_model(args, glove)
optimizer = create_optimizer(args, model)
lr_scheduler = create_lr_scheduler(args, optimizer)
evaluator = create_evaluator(model, cost)
trainer = create_trainer(
args,
model,
optimizer,
cost,
evaluator,
lr_scheduler,
training_data,
validation_data
)
train_result = trainer.run(training_data, max_epochs=args.max_epochs) # type: State
logging.info("Running trained model on test set")
test_metrics = evaluator.run(testing_data).metrics # type: dict
logging.info("Finished running trained model on test set")
logging.info("Results:")
logging.info(" Time: %.2fs", train_result.duration)
logging.info(" Epochs: %d / %d", train_result.epoch, train_result.max_epochs)
logging.info(" Iterations: %d", train_result.iteration)
logging.info(" Training:")
logging.info(" Loss: %.4f", train_result.training_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.training_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.training_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.training_metrics.precision[-1])
logging.info(" Validation:")
logging.info(" Loss: %.4f", train_result.validation_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.validation_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.validation_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.validation_metrics.precision[-1])
logging.info(" Testing:")
logging.info(" Loss: %.4f", test_metrics['loss'])
logging.info(" Accuracy: %.4f", test_metrics['accuracy'])
logging.info(" Recall: %.4f", test_metrics['recall'])
logging.info(" Precision: %.4f", test_metrics['precision'])
logging.info("Accuracy: %.2f%% of all guesses were correct", test_metrics["accuracy"] * 100)
logging.info("Recall: %.2f%% of guesses that should have identified bots did", test_metrics["recall"] * 100)
logging.info("Precision: %.2f%% of 'bot' guesses were correct", test_metrics["precision"] * 100)
save(model, args.output)
logging.info("Saved trained model (minus embeddings) to %s", args.output.name)
if __name__ == '__main__':
main() | sock/cli/train/__main__.py | import argparse
import logging
import math
import random
from argparse import ArgumentParser, ArgumentTypeError, FileType
import ignite
import torch
from ignite.engine import Engine, Events, State, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import EarlyStopping, Timer
from ignite.metrics import BinaryAccuracy, Loss, Precision, Recall
from torch.nn import Module
from torch.optim import Adam, Optimizer
from torch.utils.data import ConcatDataset, DataLoader, Dataset
from torch.utils.data.dataset import Subset, random_split
from sock.model.data import WordEmbeddings, tokenize
from sock.model.data.batching import sentence_label_pad, sentence_pad
from sock.model.dataset import (CresciTweetDataset, Five38TweetDataset, LabelDataset, NbcTweetDataset,
SingleLabelDataset, TweetTensorDataset)
from sock.model.nn import ContextualLSTM
from sock.model.serial import load, save
from sock.utils import BOT, NOT_BOT, Metrics, Splits, expand_binary_class, split_integers, to_singleton_row
def positive_int(arg: str) -> int:
i = int(arg)
if i <= 0:
raise ArgumentTypeError(f"{i} is not a positive integer")
return i
def positive_finite_float(arg: str) -> float:
f = float(arg)
if f <= 0 or math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a positive and finite number")
return f
def nonzero_finite_float(arg: str) -> float:
f = float(arg)
if math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a finite nonzero number")
return f
def nonzero_fraction(arg: str) -> float:
f = float(arg)
if f <= 0.0 or f >= 1.0:
raise ArgumentTypeError(f"{f} is not between 0 and 1 (exclusive)")
return f
def build_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Train a model"
)
data_args = parser.add_argument_group("Data")
data_args.add_argument(
"--glove",
help="The word vector embeddings to use",
metavar="path",
type=FileType('r', encoding="utf8"),
required=True
)
data_args.add_argument(
"--bots",
help="One or more files containing tweets known to be from bots",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--humans",
help="One or more files containing tweets known to be from humans",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--max-tweets",
help="The maximum number of the given tweets to use in training the model. Default: all tweets.",
metavar="max",
type=positive_int
)
data_args.add_argument(
"--output",
help="Location to save the trained model",
metavar="out",
type=FileType("wb"),
required=True
)
optimizer_hyperparams = parser.add_argument_group("Optimizer Hyperparameters")
optimizer_hyperparams.add_argument(
"--lr",
help="Learning rate (default: %(default)s)",
type=positive_finite_float,
default=1e-3,
metavar="lr"
)
optimizer_hyperparams.add_argument(
"--eps",
help="Term added to the denominator to improve numerical stability (default: %(default)s)",
type=positive_finite_float,
default=1e-8,
metavar="e"
)
optimizer_hyperparams.add_argument(
"--beta0",
help="First coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.9,
metavar="b0"
)
optimizer_hyperparams.add_argument(
"--beta1",
help="Second coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.999,
metavar="b1"
)
optimizer_hyperparams.add_argument(
"--weight-decay",
help="Weight decay (L2 penalty) (default: %(default)s)",
type=nonzero_finite_float,
default=0.0,
metavar="wd"
)
optimizer_hyperparams.add_argument(
"--amsgrad",
help="Whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond (default: %(default)s)",
action="store_true"
)
lr_hyperparams = parser.add_argument_group("LR Scheduler Hyperparameters")
lr_hyperparams.add_argument(
"--lr-patience",
help="If no improvement after this many epochs, reduce the learning rate (default: %(default)s)",
type=positive_int,
default=3,
metavar="patience"
)
training_hyperparams = parser.add_argument_group("Training Hyperparameters")
training_hyperparams.add_argument(
"--max-epochs",
help="The maximum number of passes to make over the input data (default: %(default)s)",
type=positive_int,
default=50,
metavar="epochs"
)
training_hyperparams.add_argument(
"--trainer-patience",
help="If no improvement after this many epochs, end the training (default: %(default)s)",
type=positive_int,
default=10,
metavar="patience"
)
training_hyperparams.add_argument(
"--batch-size",
help="The number of tweets to process at once (default: %(default)s)",
metavar="size",
type=positive_int,
default=500
)
training_hyperparams.add_argument(
"--train-split",
help="Fraction of input data set aside for training the model (default: %(default)s)",
type=nonzero_fraction,
default=0.5
)
training_hyperparams.add_argument(
"--valid-split",
help="Fraction of input data set aside for tuning hyperparameters (default: %(default)s)",
type=nonzero_fraction,
default=0.2
)
training_hyperparams.add_argument(
"--test-split",
help="Fraction of input data set aside for evaluating model performance (default: %(default)s)",
type=nonzero_fraction,
default=0.3
)
return parser
def validate_args(args):
if args.beta0 >= args.beta1:
raise ArgumentTypeError(f"{args.beta0} is not less than {args.beta1}")
if args.train_split + args.valid_split + args.test_split != 1.0:
raise ArgumentTypeError(f"{args.train_split}, {args.valid_split}, and {args.test_split} do not add to 1")
def load_tweets(file, embeddings: WordEmbeddings) -> Dataset:
try:
logging.debug("Loading %s as a Cresci-format dataset", file.name)
tweets = CresciTweetDataset(file.name)
logging.info("Loaded %s as a Cresci-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a Cresci-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a NBC-format dataset", file.name)
tweets = NbcTweetDataset(file.name)
logging.info("Loaded %s as a NBC-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a NBC-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a 538-format dataset", file.name)
tweets = Five38TweetDataset(file.name)
logging.info("Loaded %s as a 538-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a 538-format dataset: %s", file.name, e)
logging.error("Could not load %s as a tweet dataset!", file.name)
raise ValueError(f"Could not load {file.name} as a tweet dataset")
def load_glove(args) -> WordEmbeddings:
logging.info("Loading GloVe embeddings from %s", args.glove.name)
embeddings = WordEmbeddings(args.glove, device="cuda")
logging.info(
"Loaded GloVe embeddings from %s (dim=%d, device=%s, len=%d)",
args.glove.name,
embeddings.dim,
embeddings.device,
len(embeddings)
)
return embeddings
def create_model(args, glove: WordEmbeddings) -> ContextualLSTM:
model = ContextualLSTM(glove, device="cuda")
model.to(device="cuda")
logging.info("Created ContextualLSTM to train (device=%s)", model.device)
return model
def create_optimizer(args, model: ContextualLSTM) -> Optimizer:
# TODO: Exclude embedding weights from Adam
optimizer = Adam(
model.parameters(),
lr=args.lr,
betas=(args.beta0, args.beta1),
eps=args.eps,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
logging.info(
"Created Adam optimizer (lr=%g, betas=(%g, %g), eps=%g, weight_decay=%g, amsgrad=%s)",
args.lr,
args.beta0,
args.beta1,
args.eps,
args.weight_decay,
args.amsgrad
)
return optimizer
def load_tweet_datasets(args, datasets, type: str, glove: WordEmbeddings) -> Dataset:
loaded = []
for d in datasets:
logging.info("Loading known %ss from %s", type, d.name)
loaded.append(load_tweets(d, glove))
dataset = None
if len(loaded) == 1:
dataset = loaded[0]
else:
dataset = ConcatDataset(loaded)
if args.max_tweets is not None:
indices = random.sample(range(len(dataset)), args.max_tweets // 2)
dataset = Subset(dataset, indices)
logging.info("Loaded %d %s datasets with %d tweets", len(loaded), type, len(dataset))
return dataset
def create_splits(args, type: str, data: Dataset) -> Splits:
length = len(data)
split_lengths = split_integers(length, (args.train_split, args.valid_split, args.test_split))
logging.info(
"Splitting %d %s tweets (train=%g, valid=%g, test=%g)",
length,
type,
args.train_split,
args.valid_split,
args.test_split
)
splits = random_split(data, split_lengths)
logging.info("Split %d %s tweets (train=%d, valid=%d, test=%d)", length, type, *split_lengths)
return Splits(data, *splits)
def create_loader(args, human: DataLoader, bot: DataLoader, subset: str) -> DataLoader:
human = SingleLabelDataset(human, NOT_BOT)
bot = SingleLabelDataset(bot, BOT)
dataset = ConcatDataset([human, bot])
dataloader = DataLoader(dataset=dataset, shuffle=True, batch_size=args.batch_size, collate_fn=sentence_label_pad)
logging.info("Created a %s DataLoader (len=%d, batch_size=%d)", subset, len(dataset), args.batch_size)
return dataloader
def create_lr_scheduler(args, optimizer: Optimizer):
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=args.lr_patience)
return scheduler
def create_evaluator(model: ContextualLSTM, cost: Module):
evaluator = create_supervised_evaluator(
model,
metrics={
"loss": Loss(cost, output_transform=to_singleton_row),
"accuracy": BinaryAccuracy(output_transform=to_singleton_row),
"recall": Recall(average=True, output_transform=expand_binary_class),
"precision": Precision(average=True, output_transform=expand_binary_class),
}
)
evaluator._logger.setLevel(logging.WARNING)
return evaluator
def create_trainer(
args,
model: ContextualLSTM,
optimizer: Optimizer,
cost: Module,
evaluator: Engine,
scheduler,
training_data: DataLoader,
validation_data: DataLoader
):
model.train(True)
trainer = ignite.engine.create_supervised_trainer(model, optimizer, cost, model.device)
trainer.state = ignite.engine.State()
@trainer.on(Events.COMPLETED)
def finish_training(trainer: Engine):
model.train(False)
logging.info("Finished training and evaluation")
@trainer.on(Events.STARTED)
def init_metrics(trainer: Engine):
trainer.state.training_metrics = Metrics([], [], [], [])
trainer.state.validation_metrics = Metrics([], [], [], [])
logging.info("Initialized metrics")
@trainer.on(Events.EPOCH_COMPLETED)
def validate(trainer: Engine):
training_metrics = evaluator.run(training_data).metrics # type: Dict[str, float]
trainer.state.training_metrics.loss.append(training_metrics["loss"])
trainer.state.training_metrics.accuracy.append(training_metrics["accuracy"])
trainer.state.training_metrics.recall.append(training_metrics["recall"])
trainer.state.training_metrics.precision.append(training_metrics["precision"])
logging.info(
"[%d / %d] Train: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
training_metrics["loss"],
training_metrics["accuracy"],
training_metrics["recall"],
training_metrics["precision"]
)
validation_metrics = evaluator.run(validation_data).metrics # type: Dict[str, float]
trainer.state.validation_metrics.loss.append(validation_metrics["loss"])
trainer.state.validation_metrics.accuracy.append(validation_metrics["accuracy"])
trainer.state.validation_metrics.recall.append(validation_metrics["recall"])
trainer.state.validation_metrics.precision.append(validation_metrics["precision"])
logging.info(
"[%d / %d] Valid: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
validation_metrics["loss"],
validation_metrics["accuracy"],
validation_metrics["recall"],
validation_metrics["precision"]
)
scheduler.step(validation_metrics["loss"])
timer = Timer(average=True)
@trainer.on(Events.COMPLETED)
def record_time(trainer: Engine):
trainer.state.duration = timer.value()
def score_function(trainer: Engine) -> float:
return -trainer.state.validation_metrics.loss[-1]
handler = EarlyStopping(patience=args.trainer_patience, score_function=score_function, trainer=trainer)
trainer.add_event_handler(Events.EPOCH_COMPLETED, handler)
timer.attach(trainer, start=Events.STARTED, pause=Events.COMPLETED)
trainer._logger.setLevel(logging.WARNING)
return trainer
def main():
logging.basicConfig(
format='[%(levelname)s %(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
parser = build_parser()
args = parser.parse_args()
if not torch.cuda.is_available():
raise RuntimeError("CUDA is required but not available")
if not torch.backends.cudnn.is_available():
raise RuntimeError("CUDNN is required but not available")
validate_args(args)
cost = torch.nn.BCELoss()
glove = load_glove(args)
bots = load_tweet_datasets(args, args.bots, "bot", glove)
humans = load_tweet_datasets(args, args.humans, "human", glove)
bot_splits = create_splits(args, "bot", bots)
human_splits = create_splits(args, "human", humans)
training_data = create_loader(args, human_splits.training, bot_splits.training, "training")
validation_data = create_loader(args, human_splits.validation, bot_splits.validation, "validation")
testing_data = create_loader(args, human_splits.testing, bot_splits.testing, "testing")
model = create_model(args, glove)
optimizer = create_optimizer(args, model)
lr_scheduler = create_lr_scheduler(args, optimizer)
evaluator = create_evaluator(model, cost)
trainer = create_trainer(
args,
model,
optimizer,
cost,
evaluator,
lr_scheduler,
training_data,
validation_data
)
train_result = trainer.run(training_data, max_epochs=args.max_epochs) # type: State
logging.info("Running trained model on test set")
test_metrics = evaluator.run(testing_data).metrics # type: dict
logging.info("Finished running trained model on test set")
logging.info("Results:")
logging.info(" Time: %.2fs", train_result.duration)
logging.info(" Epochs: %d / %d", train_result.epoch, train_result.max_epochs)
logging.info(" Iterations: %d", train_result.iteration)
logging.info(" Training:")
logging.info(" Loss: %.4f", train_result.training_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.training_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.training_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.training_metrics.precision[-1])
logging.info(" Validation:")
logging.info(" Loss: %.4f", train_result.validation_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.validation_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.validation_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.validation_metrics.precision[-1])
logging.info(" Testing:")
logging.info(" Loss: %.4f", test_metrics['loss'])
logging.info(" Accuracy: %.4f", test_metrics['accuracy'])
logging.info(" Recall: %.4f", test_metrics['recall'])
logging.info(" Precision: %.4f", test_metrics['precision'])
logging.info("Accuracy: %.2f%% of all guesses were correct", test_metrics["accuracy"] * 100)
logging.info("Recall: %.2f%% of guesses that should have identified bots did", test_metrics["recall"] * 100)
logging.info("Precision: %.2f%% of 'bot' guesses were correct", test_metrics["precision"] * 100)
save(model, args.output)
logging.info("Saved trained model (minus embeddings) to %s", args.output.name)
if __name__ == '__main__':
main() | 0.771198 | 0.307228 |
import json
import pytest
from pathlib import Path
from django.conf import settings
from snoop.data import models, filesystem
from snoop.data.analyzers import archives
from snoop.data.utils import time_from_unix
pytestmark = [pytest.mark.django_db]
STOCK_PHOTO = {
'blob_pk': 'f7281c8a9cc404816f019382bd121c5fff28e0b816f632f43069b1f7c40c3e6e',
'name': 'stock-photo-house-mouse-standing-on-rear-feet-mus-musculus-137911070.jpg',
'type': 'file',
}
IS_THIS = {
'blob_pk': '2228e662341d939650d313b8971984d99b0d50791f7b4c06034b6f254436a3c3',
'name': 'is this?',
'type': 'file',
}
PACKAGE_JSON = {
'blob_pk': 'de16d79543b6aeaf19c025264cf9a368ce6bdd3f7375091835dc40a8559056cd',
'name': 'package.json',
'type': 'file',
}
JERRY_7Z = {
'blob_pk': '84e69da35c4fa4c4a3e7be2a1ff30773aaeb107258618d788897da8e634d3ff0',
'name': 'jerry.7z',
'type': 'file',
}
MOUSE_DIR = {
'children': [STOCK_PHOTO],
'name': 'mouse',
'type': 'directory',
}
WHAT_DIR = {
'children': [IS_THIS],
'name': 'what',
'type': 'directory',
}
ETC_DIR = {
'children': [JERRY_7Z],
'name': 'etc',
'type': 'directory',
}
JERRY_DIR = {
'children': [
PACKAGE_JSON,
],
'name': 'jerry',
'type': 'directory',
}
JERRY_ZIP = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/tom/jail/jerry.zip"
ZIP_DOCX = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/zip-with-docx-and-doc.zip"
JANE_DOE_PST = Path(settings.SNOOP_TESTDATA) / "data/pst/flags_jane_doe.pst"
SHAPELIB_MBOX = Path(settings.SNOOP_TESTDATA) / "data/mbox/shapelib.mbox"
TAR_GZ = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/targz-with-pdf-doc-docx.tar.gz"
RAR = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/rar-with-pdf-doc-docx.rar"
def test_unarchive_zip(taskmanager, testdata_current):
zip_blob = models.Blob.create_from_file(JERRY_ZIP)
listing_blob = archives.unarchive(zip_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert listing[0]['name'] == 'jerry'
assert listing[0]['type'] == 'directory'
assert ETC_DIR in listing[0]['children']
assert PACKAGE_JSON in listing[0]['children']
assert WHAT_DIR in listing[0]['children']
assert MOUSE_DIR in listing[0]['children']
def test_unarchive_pst(taskmanager, testdata_current):
pst_blob = models.Blob.create_from_file(JANE_DOE_PST)
listing_blob = archives.unarchive(pst_blob)
with listing_blob.open() as f:
listing = json.load(f)
EML_NUMBER_5 = { # noqa: F841
"type": "file",
"name": "5.eml",
"blob_pk": "9c007ccf1720d6279fc64389321fd83e053c9c4abc885a1745e9bc6793d515c9"
}
[root_dir] = listing
assert root_dir['name'] == '<EMAIL>'
assert root_dir['type'] == 'directory'
assert len(root_dir['children']) == 3
def test_unarchive_tar_gz(taskmanager, testdata_current):
tar_gz_blob = models.Blob.create_from_file(TAR_GZ)
listing_blob = archives.unarchive(tar_gz_blob)
with listing_blob.open() as f:
listing = json.load(f)
[tar_file] = listing
assert tar_file['type'] == 'file'
tar_blob = models.Blob.objects.get(pk=tar_file['blob_pk'])
listing_blob = archives.unarchive(tar_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert set(f['name'] for f in listing) == {
'sample (1).doc',
'Sample_BulletsAndNumberings.docx',
'cap33.pdf',
}
def test_unarchive_rar(taskmanager, testdata_current):
rar = models.Blob.create_from_file(RAR)
listing_blob = archives.unarchive(rar)
with listing_blob.open() as f:
listing = json.load(f)
assert set(f['name'] for f in listing) == {
'sample (1).doc',
'Sample_BulletsAndNumberings.docx',
'cap33.pdf',
}
def test_create_archive_files(taskmanager, testdata_current):
assert models.Directory.objects.count() == 0 # empty?
zip_blob = models.Blob.create_from_file(ZIP_DOCX)
listing_blob = archives.unarchive(zip_blob)
zip_parent_dir = models.Directory.objects.create()
zip_file = models.File.objects.create(
name_bytes=JERRY_ZIP.name.encode('utf8'),
parent_directory=zip_parent_dir,
ctime=time_from_unix(0),
mtime=time_from_unix(0),
size=0,
original=zip_blob,
blob=zip_blob,
)
filesystem.create_archive_files(zip_file.pk, listing_blob)
assert models.Directory.objects.count() == 2 # root and fake dir
assert models.File.objects.count() == 3 # zip, docx and doc
file_names = set(f.name for f in models.File.objects.all())
assert file_names == {'jerry.zip', 'AppBody-Sample-English.docx', 'sample.doc'}
d = models.Directory.objects.get(container_file__isnull=False)
assert d.container_file == zip_file
def test_unarchive_mbox(taskmanager, testdata_current):
mbox_blob = models.Blob.create_from_file(SHAPELIB_MBOX)
listing_blob = archives.unarchive(mbox_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert len(listing) == 28
dir07 = [x for x in listing if x['name'] == '07'][0]
assert dir07 == {
'children': [
{'blob_pk': '7779d128a2cd425eba06693b56e1fc7351c1d66c2ee78c96dd4bd5649307f636',
'name': '0716d9708d321ffb6a00818614779e779925365c.eml',
'type': 'file'}],
'name': '07',
'type': 'directory'} | testsuite/test_archives.py | import json
import pytest
from pathlib import Path
from django.conf import settings
from snoop.data import models, filesystem
from snoop.data.analyzers import archives
from snoop.data.utils import time_from_unix
pytestmark = [pytest.mark.django_db]
STOCK_PHOTO = {
'blob_pk': 'f7281c8a9cc404816f019382bd121c5fff28e0b816f632f43069b1f7c40c3e6e',
'name': 'stock-photo-house-mouse-standing-on-rear-feet-mus-musculus-137911070.jpg',
'type': 'file',
}
IS_THIS = {
'blob_pk': '2228e662341d939650d313b8971984d99b0d50791f7b4c06034b6f254436a3c3',
'name': 'is this?',
'type': 'file',
}
PACKAGE_JSON = {
'blob_pk': 'de16d79543b6aeaf19c025264cf9a368ce6bdd3f7375091835dc40a8559056cd',
'name': 'package.json',
'type': 'file',
}
JERRY_7Z = {
'blob_pk': '84e69da35c4fa4c4a3e7be2a1ff30773aaeb107258618d788897da8e634d3ff0',
'name': 'jerry.7z',
'type': 'file',
}
MOUSE_DIR = {
'children': [STOCK_PHOTO],
'name': 'mouse',
'type': 'directory',
}
WHAT_DIR = {
'children': [IS_THIS],
'name': 'what',
'type': 'directory',
}
ETC_DIR = {
'children': [JERRY_7Z],
'name': 'etc',
'type': 'directory',
}
JERRY_DIR = {
'children': [
PACKAGE_JSON,
],
'name': 'jerry',
'type': 'directory',
}
JERRY_ZIP = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/tom/jail/jerry.zip"
ZIP_DOCX = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/zip-with-docx-and-doc.zip"
JANE_DOE_PST = Path(settings.SNOOP_TESTDATA) / "data/pst/flags_jane_doe.pst"
SHAPELIB_MBOX = Path(settings.SNOOP_TESTDATA) / "data/mbox/shapelib.mbox"
TAR_GZ = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/targz-with-pdf-doc-docx.tar.gz"
RAR = Path(settings.SNOOP_TESTDATA) / "data/disk-files/archives/rar-with-pdf-doc-docx.rar"
def test_unarchive_zip(taskmanager, testdata_current):
zip_blob = models.Blob.create_from_file(JERRY_ZIP)
listing_blob = archives.unarchive(zip_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert listing[0]['name'] == 'jerry'
assert listing[0]['type'] == 'directory'
assert ETC_DIR in listing[0]['children']
assert PACKAGE_JSON in listing[0]['children']
assert WHAT_DIR in listing[0]['children']
assert MOUSE_DIR in listing[0]['children']
def test_unarchive_pst(taskmanager, testdata_current):
pst_blob = models.Blob.create_from_file(JANE_DOE_PST)
listing_blob = archives.unarchive(pst_blob)
with listing_blob.open() as f:
listing = json.load(f)
EML_NUMBER_5 = { # noqa: F841
"type": "file",
"name": "5.eml",
"blob_pk": "9c007ccf1720d6279fc64389321fd83e053c9c4abc885a1745e9bc6793d515c9"
}
[root_dir] = listing
assert root_dir['name'] == '<EMAIL>'
assert root_dir['type'] == 'directory'
assert len(root_dir['children']) == 3
def test_unarchive_tar_gz(taskmanager, testdata_current):
tar_gz_blob = models.Blob.create_from_file(TAR_GZ)
listing_blob = archives.unarchive(tar_gz_blob)
with listing_blob.open() as f:
listing = json.load(f)
[tar_file] = listing
assert tar_file['type'] == 'file'
tar_blob = models.Blob.objects.get(pk=tar_file['blob_pk'])
listing_blob = archives.unarchive(tar_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert set(f['name'] for f in listing) == {
'sample (1).doc',
'Sample_BulletsAndNumberings.docx',
'cap33.pdf',
}
def test_unarchive_rar(taskmanager, testdata_current):
rar = models.Blob.create_from_file(RAR)
listing_blob = archives.unarchive(rar)
with listing_blob.open() as f:
listing = json.load(f)
assert set(f['name'] for f in listing) == {
'sample (1).doc',
'Sample_BulletsAndNumberings.docx',
'cap33.pdf',
}
def test_create_archive_files(taskmanager, testdata_current):
assert models.Directory.objects.count() == 0 # empty?
zip_blob = models.Blob.create_from_file(ZIP_DOCX)
listing_blob = archives.unarchive(zip_blob)
zip_parent_dir = models.Directory.objects.create()
zip_file = models.File.objects.create(
name_bytes=JERRY_ZIP.name.encode('utf8'),
parent_directory=zip_parent_dir,
ctime=time_from_unix(0),
mtime=time_from_unix(0),
size=0,
original=zip_blob,
blob=zip_blob,
)
filesystem.create_archive_files(zip_file.pk, listing_blob)
assert models.Directory.objects.count() == 2 # root and fake dir
assert models.File.objects.count() == 3 # zip, docx and doc
file_names = set(f.name for f in models.File.objects.all())
assert file_names == {'jerry.zip', 'AppBody-Sample-English.docx', 'sample.doc'}
d = models.Directory.objects.get(container_file__isnull=False)
assert d.container_file == zip_file
def test_unarchive_mbox(taskmanager, testdata_current):
mbox_blob = models.Blob.create_from_file(SHAPELIB_MBOX)
listing_blob = archives.unarchive(mbox_blob)
with listing_blob.open() as f:
listing = json.load(f)
assert len(listing) == 28
dir07 = [x for x in listing if x['name'] == '07'][0]
assert dir07 == {
'children': [
{'blob_pk': '7779d128a2cd425eba06693b56e1fc7351c1d66c2ee78c96dd4bd5649307f636',
'name': '0716d9708d321ffb6a00818614779e779925365c.eml',
'type': 'file'}],
'name': '07',
'type': 'directory'} | 0.322313 | 0.326459 |
from .function import Function
# Instantiate one of each function class to use
data = []
#region Basic IO
def o(variable):
print(variable.value, end='')
data.append(Function('o', 1, o))
def O(variable):
print(variable.value, end='')
quit()
data.append(Function('O', 1, O))
def p(variable):
print(variable.value)
data.append(Function('p', 1, p))
def P(variable):
print(variable.value)
quit()
data.append(Function('P', 1, P))
def q():
quit()
data.append(Function('q', 0, q))
def Q(variable):
if variable.value > 0:
quit()
data.append(Function('Q', 1, Q))
#endregion Basic IO
#region Comparing
def equals(var1, var2, var3):
var3.value = (var1.value == var2.value)
data.append(Function('=', 3, equals))
def e(variable):
variable.value = 1 - variable.value % 2
data.append(Function('e', 1, e))
def E(var1, var2):
var2.value = 1 - var1.value % 2
data.append(Function('E', 2, E))
#endregion Comparing
#region Basic math
def a(var1, var2):
var2.value += var1.value
data.append(Function('a', 2, a))
def A(var1, var2, var3):
var3.value = var1.value + var2.value
data.append(Function('A', 3, A))
def s(var1, var2):
var2.value = var1.value - var2.value
data.append(Function('s', 2, s))
def S(var1, var2, var3):
var3.value = var1.value - var2.value
data.append(Function('S', 3, S))
def m(var1, var2):
var2.value *= var1.value
data.append(Function('m', 1, m))
def M(var1, var2, var3):
var3.value = var1.value * var2.value
data.append(Function('M', 1, M))
def plus(variable):
variable.value += 1
data.append(Function('+', 1, plus))
def minus(variable):
variable.value -= 1
data.append(Function('-', 1, minus))
#endregion Basic math
#region Not basic math
def f(var1, var2):
var2.value = []
for i in range(1, var1.value + 1):
if var1.value % i == 0:
var2.value.append(i)
data.append(Function('f', 2, f))
def F(var1, var2):
print('Sorry, haven\'t implemented F yet')
data.append(Function('F', 2, F))
#endregion Not basic math
#region Lists
def c(var1, var2, var3):
var3.value = (var1.value in var2.value)
data.append(Function('c', 3, c))
def C(var1, var2, var3):
var3.value = (var1.value not in var2.value)
data.append(Function('C', 3, C))
def semicolon(var1, var2, var3):
var3.value = var1.value[var2.value]
data.append(Function(':', 3, semicolon))
def colon(var1, var2, var3):
var1.value = [*var1.value, *[''] * (len(var1.value) - var3.value + 1)]
var1.value[var3.value] = var2.value
data.append(Function(':', 3, colon))
def l(var1, var2):
var2.value = len(var1)
data.append(Function('l', 2, l))
def L(var1, var2):
var2.value = len(var1) - 1
data.append(Function('L', 2, L))
def slash(var1, var2):
var1.value.append(var2.value)
data.append(Function('/', 2, slash))
#endregion Lists
#region "Branching"
def i(var1, var2, var3):
if var1.value != 0:
var3.value = var2.value
data.append(Function('i', 3, i))
def I(var1, var2, var3):
if var1.value == 0:
print(var2.value)
else:
print(var3.value)
data.append(Function('I', 3, I))
#endregion "Branching"
#region Other
def h():
print('Hello, World!')
data.append(Function('h', 0, h))
def H(variable):
variable.value = 'Hello, World!'
data.append(Function('H', 1, H))
#endregion Other | flog/functions/__init__.py | from .function import Function
# Instantiate one of each function class to use
data = []
#region Basic IO
def o(variable):
print(variable.value, end='')
data.append(Function('o', 1, o))
def O(variable):
print(variable.value, end='')
quit()
data.append(Function('O', 1, O))
def p(variable):
print(variable.value)
data.append(Function('p', 1, p))
def P(variable):
print(variable.value)
quit()
data.append(Function('P', 1, P))
def q():
quit()
data.append(Function('q', 0, q))
def Q(variable):
if variable.value > 0:
quit()
data.append(Function('Q', 1, Q))
#endregion Basic IO
#region Comparing
def equals(var1, var2, var3):
var3.value = (var1.value == var2.value)
data.append(Function('=', 3, equals))
def e(variable):
variable.value = 1 - variable.value % 2
data.append(Function('e', 1, e))
def E(var1, var2):
var2.value = 1 - var1.value % 2
data.append(Function('E', 2, E))
#endregion Comparing
#region Basic math
def a(var1, var2):
var2.value += var1.value
data.append(Function('a', 2, a))
def A(var1, var2, var3):
var3.value = var1.value + var2.value
data.append(Function('A', 3, A))
def s(var1, var2):
var2.value = var1.value - var2.value
data.append(Function('s', 2, s))
def S(var1, var2, var3):
var3.value = var1.value - var2.value
data.append(Function('S', 3, S))
def m(var1, var2):
var2.value *= var1.value
data.append(Function('m', 1, m))
def M(var1, var2, var3):
var3.value = var1.value * var2.value
data.append(Function('M', 1, M))
def plus(variable):
variable.value += 1
data.append(Function('+', 1, plus))
def minus(variable):
variable.value -= 1
data.append(Function('-', 1, minus))
#endregion Basic math
#region Not basic math
def f(var1, var2):
var2.value = []
for i in range(1, var1.value + 1):
if var1.value % i == 0:
var2.value.append(i)
data.append(Function('f', 2, f))
def F(var1, var2):
print('Sorry, haven\'t implemented F yet')
data.append(Function('F', 2, F))
#endregion Not basic math
#region Lists
def c(var1, var2, var3):
var3.value = (var1.value in var2.value)
data.append(Function('c', 3, c))
def C(var1, var2, var3):
var3.value = (var1.value not in var2.value)
data.append(Function('C', 3, C))
def semicolon(var1, var2, var3):
var3.value = var1.value[var2.value]
data.append(Function(':', 3, semicolon))
def colon(var1, var2, var3):
var1.value = [*var1.value, *[''] * (len(var1.value) - var3.value + 1)]
var1.value[var3.value] = var2.value
data.append(Function(':', 3, colon))
def l(var1, var2):
var2.value = len(var1)
data.append(Function('l', 2, l))
def L(var1, var2):
var2.value = len(var1) - 1
data.append(Function('L', 2, L))
def slash(var1, var2):
var1.value.append(var2.value)
data.append(Function('/', 2, slash))
#endregion Lists
#region "Branching"
def i(var1, var2, var3):
if var1.value != 0:
var3.value = var2.value
data.append(Function('i', 3, i))
def I(var1, var2, var3):
if var1.value == 0:
print(var2.value)
else:
print(var3.value)
data.append(Function('I', 3, I))
#endregion "Branching"
#region Other
def h():
print('Hello, World!')
data.append(Function('h', 0, h))
def H(variable):
variable.value = 'Hello, World!'
data.append(Function('H', 1, H))
#endregion Other | 0.114579 | 0.378746 |
import sys
import os
import matplotlib.pyplot as plt
import logging
from nukleus.SexpWriter import SexpWriter
sys.path.append('src')
sys.path.append('../src')
import nukleus
from nukleus.draw import Dot, Label, Line, Element
from nukleus import Circuit
from nukleus.Netlist import Netlist
from nukleus.spice.Potentiometer import Potentiometer
from nukleus.SchemDraw import SchemDraw
from nukleus.transform import get_pins
import numpy as np
# initialize the logger
logging.basicConfig(format='%(levelname)s:%(message)s', encoding='utf-8', level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
spice = nukleus.spice_path(['files/spice'])
cwd = os.getcwd() + "/files/spice"
cwd2 = "/home/etienne/Documents/elektrophon/lib/spice"
models = nukleus.SpiceModel.load_spice_models([cwd2])
draw = SchemDraw(library_path=['/usr/share/kicad/symbols'])
draw.add(Label("INPUT").rotate(180))
draw.add(Line())
draw.add(in_dot := Dot())
draw.add(Element("D1", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Line().length(draw.unit*6))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=1,
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='TL072c').anchor(3))
draw.add(Element("R2", "Device:R", value="180k").at(in_dot).rotate(0))
draw.add(Element("GND", "power:GND"))
draw.add(Line().length(draw.unit*2).at(get_pins(draw.U1[0])['2']).left())
draw.add(reference_dot := Dot())
draw.add(Element("R3", "Device:R", value="10k"))
draw.add(Element("GND", "power:GND"))
draw.add(Line().length(draw.unit*2).at(reference_dot).up())
draw.add(Element("R4", "Device:R", value="50k").rotate(180))
draw.add(Element("+15V", "power:+15V"))
draw.add(Line().length(draw.unit).at(get_pins(draw.U1[0])['1']).right())
draw.add(opamp_dot := Dot())
draw.add(Line().length(draw.unit*2).up())
draw.add(Element("D2", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Element("RV1", "Device:R_Potentiometer", value="1Meg",
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='Potentiometer1').anchor(1).rotate(90).mirror('x'))
draw.add(Line().length(draw.unit))
draw.add(Line().toy(get_pins(draw.RV1[0])['2']))
draw.add(rv1_dot := Dot())
draw.add(Line().toy(opamp_dot))
draw.add(reg_dot := Dot())
draw.add(Line().at(rv1_dot).tox(get_pins(draw.RV1[0])['2']))
draw.add(Line().length(draw.unit*2).down().at(opamp_dot))
draw.add(Element("D3", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Element("RV2", "Device:R_Potentiometer", value="1Meg",
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='Potentiometer2').anchor(1).rotate(90).mirror('x'))
draw.add(Line().length(draw.unit))
draw.add(Line().toy(get_pins(draw.RV2[0])['3']))
draw.add(rv2_dot := Dot())
draw.add(Line().toy(opamp_dot))
draw.add(reg_dot := Dot())
draw.add(Line().at(rv2_dot).toy(get_pins(draw.RV2[0])['2']))
draw.add(Line().tox(get_pins(draw.RV2[0])['2']))
draw.add(Line().at(reg_dot).length(draw.unit*2))
draw.add(cap_dot := Dot())
draw.add(Line().length(draw.unit*2))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=2,
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='TL072c').anchor(5).mirror('x'))
draw.add(Element("C1", "Device:C", value="1u").rotate(0).at(cap_dot))
draw.add(Element("GND", "power:GND"))
draw.add(Line().at(get_pins(draw.U1[1])['6']).up().length(draw.unit*3))
draw.add(Line().tox(get_pins(draw.U1[1])['7']))
draw.add(Line().toy(get_pins(draw.U1[1])['7']))
draw.add(out_dot := Dot())
draw.add(Line().right())
draw.add(Element("R5", "Device:R", value="1k").rotate(90))
draw.add(Label("OUTPUT").rotate(0))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=3, on_schema=False).at((120, 50)))
draw.add(Element("+15V", "power:+15V", on_schema=False).at(get_pins(draw.U1[2])['8']))
draw.add(Element("-15V", "power:-15V", on_schema=False).at(get_pins(draw.U1[2])['4']).rotate(180))
print("outputsexp")
sexpwriter = SexpWriter()
draw.produce(sexpwriter)
print("spice")
circuit = nukleus.Circuit()
circuit.models(models)
netlist = Netlist(draw)
draw.produce(netlist)
pot1 = Potentiometer("Potentiometer1", 1000000, 0.01)
pot2 = Potentiometer("Potentiometer2", 1000000, 0.99)
circuit.subcircuit(pot1)
circuit.subcircuit(pot2)
nukleus.plot('envelope.svg', draw)
netlist.spice(circuit)
circuit.V("1", "+15V", "GND", "DC 15V")
circuit.V("2", "-15V", "GND", "DC -15V")
circuit.V("3", "INPUT", "GND", "DC 0 AC 0 PULSE(0 5 20m 0m 0m 60m 200m)")
print(f'--------------\n {circuit} \n---------------')
spice = nukleus.spice.ngspice()
spice.circuit(circuit.__str__())
print(spice.cmd("version"))
print(spice.circuit(circuit.__str__()))
ar_analysis = spice.transient('0.5ms', '200ms', '0ms')
fig_buffer, ax1_buffer = plt.subplots()
ax1_buffer.set_xlabel('time [ms]')
ax1_buffer.set_ylabel('amplitude [V]')
ax1_buffer.plot(ar_analysis['time'], ar_analysis['input'], color='Grey')
ax1_buffer.plot(ar_analysis['time'], ar_analysis['output'], color='Red')
ax1_buffer.legend(('trigger', 'envelope'), loc=(0.75,0.8))
plt.grid()
plt.tight_layout()
plt.show() | samples/draw2.py | import sys
import os
import matplotlib.pyplot as plt
import logging
from nukleus.SexpWriter import SexpWriter
sys.path.append('src')
sys.path.append('../src')
import nukleus
from nukleus.draw import Dot, Label, Line, Element
from nukleus import Circuit
from nukleus.Netlist import Netlist
from nukleus.spice.Potentiometer import Potentiometer
from nukleus.SchemDraw import SchemDraw
from nukleus.transform import get_pins
import numpy as np
# initialize the logger
logging.basicConfig(format='%(levelname)s:%(message)s', encoding='utf-8', level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
spice = nukleus.spice_path(['files/spice'])
cwd = os.getcwd() + "/files/spice"
cwd2 = "/home/etienne/Documents/elektrophon/lib/spice"
models = nukleus.SpiceModel.load_spice_models([cwd2])
draw = SchemDraw(library_path=['/usr/share/kicad/symbols'])
draw.add(Label("INPUT").rotate(180))
draw.add(Line())
draw.add(in_dot := Dot())
draw.add(Element("D1", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Line().length(draw.unit*6))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=1,
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='TL072c').anchor(3))
draw.add(Element("R2", "Device:R", value="180k").at(in_dot).rotate(0))
draw.add(Element("GND", "power:GND"))
draw.add(Line().length(draw.unit*2).at(get_pins(draw.U1[0])['2']).left())
draw.add(reference_dot := Dot())
draw.add(Element("R3", "Device:R", value="10k"))
draw.add(Element("GND", "power:GND"))
draw.add(Line().length(draw.unit*2).at(reference_dot).up())
draw.add(Element("R4", "Device:R", value="50k").rotate(180))
draw.add(Element("+15V", "power:+15V"))
draw.add(Line().length(draw.unit).at(get_pins(draw.U1[0])['1']).right())
draw.add(opamp_dot := Dot())
draw.add(Line().length(draw.unit*2).up())
draw.add(Element("D2", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Element("RV1", "Device:R_Potentiometer", value="1Meg",
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='Potentiometer1').anchor(1).rotate(90).mirror('x'))
draw.add(Line().length(draw.unit))
draw.add(Line().toy(get_pins(draw.RV1[0])['2']))
draw.add(rv1_dot := Dot())
draw.add(Line().toy(opamp_dot))
draw.add(reg_dot := Dot())
draw.add(Line().at(rv1_dot).tox(get_pins(draw.RV1[0])['2']))
draw.add(Line().length(draw.unit*2).down().at(opamp_dot))
draw.add(Element("D3", "Diode:1N4148",
Spice_Netlist_Enabled='Y',
Spice_Primitive='D',
Spice_Model='D1N4148',
Spice_Node_Sequence='2 1').mirror('y').anchor('2'))
draw.add(Element("RV2", "Device:R_Potentiometer", value="1Meg",
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='Potentiometer2').anchor(1).rotate(90).mirror('x'))
draw.add(Line().length(draw.unit))
draw.add(Line().toy(get_pins(draw.RV2[0])['3']))
draw.add(rv2_dot := Dot())
draw.add(Line().toy(opamp_dot))
draw.add(reg_dot := Dot())
draw.add(Line().at(rv2_dot).toy(get_pins(draw.RV2[0])['2']))
draw.add(Line().tox(get_pins(draw.RV2[0])['2']))
draw.add(Line().at(reg_dot).length(draw.unit*2))
draw.add(cap_dot := Dot())
draw.add(Line().length(draw.unit*2))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=2,
Spice_Netlist_Enabled='Y',
Spice_Primitive='X',
Spice_Model='TL072c').anchor(5).mirror('x'))
draw.add(Element("C1", "Device:C", value="1u").rotate(0).at(cap_dot))
draw.add(Element("GND", "power:GND"))
draw.add(Line().at(get_pins(draw.U1[1])['6']).up().length(draw.unit*3))
draw.add(Line().tox(get_pins(draw.U1[1])['7']))
draw.add(Line().toy(get_pins(draw.U1[1])['7']))
draw.add(out_dot := Dot())
draw.add(Line().right())
draw.add(Element("R5", "Device:R", value="1k").rotate(90))
draw.add(Label("OUTPUT").rotate(0))
draw.add(Element("U1", "Amplifier_Operational:TL072", unit=3, on_schema=False).at((120, 50)))
draw.add(Element("+15V", "power:+15V", on_schema=False).at(get_pins(draw.U1[2])['8']))
draw.add(Element("-15V", "power:-15V", on_schema=False).at(get_pins(draw.U1[2])['4']).rotate(180))
print("outputsexp")
sexpwriter = SexpWriter()
draw.produce(sexpwriter)
print("spice")
circuit = nukleus.Circuit()
circuit.models(models)
netlist = Netlist(draw)
draw.produce(netlist)
pot1 = Potentiometer("Potentiometer1", 1000000, 0.01)
pot2 = Potentiometer("Potentiometer2", 1000000, 0.99)
circuit.subcircuit(pot1)
circuit.subcircuit(pot2)
nukleus.plot('envelope.svg', draw)
netlist.spice(circuit)
circuit.V("1", "+15V", "GND", "DC 15V")
circuit.V("2", "-15V", "GND", "DC -15V")
circuit.V("3", "INPUT", "GND", "DC 0 AC 0 PULSE(0 5 20m 0m 0m 60m 200m)")
print(f'--------------\n {circuit} \n---------------')
spice = nukleus.spice.ngspice()
spice.circuit(circuit.__str__())
print(spice.cmd("version"))
print(spice.circuit(circuit.__str__()))
ar_analysis = spice.transient('0.5ms', '200ms', '0ms')
fig_buffer, ax1_buffer = plt.subplots()
ax1_buffer.set_xlabel('time [ms]')
ax1_buffer.set_ylabel('amplitude [V]')
ax1_buffer.plot(ar_analysis['time'], ar_analysis['input'], color='Grey')
ax1_buffer.plot(ar_analysis['time'], ar_analysis['output'], color='Red')
ax1_buffer.legend(('trigger', 'envelope'), loc=(0.75,0.8))
plt.grid()
plt.tight_layout()
plt.show() | 0.301259 | 0.256395 |
import json as _json
import logging
import os
import time
from logging.config import dictConfig
from attr import attrib, attrs, validators
from dateutil.parser import isoparse
from flask import Blueprint, Flask, current_app, make_response, request
from .auth import authenticate
from .rate_limiter import limiter
bp = Blueprint("logger", __name__, url_prefix="/logger")
# ............. Model ............
@attrs(frozen=True)
class LogRecord:
message: str = attrib()
log_level: str = attrib()
@log_level.validator
def _validate_log_level(self, attribute, value):
if value not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
raise ValueError("Invalid log level: " + value)
application_name: str = attrib()
process_name: str = attrib()
process_id: int = attrib(validator=validators.instance_of(int), converter=int)
client_time: str = attrib()
@client_time.validator
def _validate_client_time(self, attribute, value):
try:
isoparse(value)
except Exception:
raise ValueError("Improper date format " + value)
extra: dict = attrib(factory=dict)
@extra.validator
def _validate_extra(self, attribute, value):
if isinstance(value, dict):
return
try:
_json.loads(value)
except Exception as e:
raise ValueError("Improper json format. " + str(e))
@classmethod
def from_json(cls, json):
message = json["message"]
application_name = json["applicationName"]
process_name = json["processName"]
process_id = json["processId"]
log_level = json["logLevel"]
client_time = json["dateTime"]
extra = json["extra"] if "extra" in json else dict()
return cls(
message=message,
application_name=application_name,
process_name=process_name,
process_id=process_id,
log_level=log_level,
client_time=client_time,
extra=extra,
)
"""
Sample Log:
{
"message": "User could not be found.",
"extra": {"userId": 5, "endpoint": "/users/5"},
"logLevel": "ERROR",
"applicationName": "BingoBangoBongo",
"authToken": "<PASSWORD>",
"dateTime": datetime("2020-04-20"),
"processName": "node.exe"
"processId": 6545,
},
"""
# ............. Flask ............
"""
* Function Name: init
* Description: This function is used to initialize the logging service.
* Parameters:
Flask: app
* Returns: None
"""
def init(app: Flask):
""" This is called during app creation to initialize the logger app. """
LOG_FOLDER = app.config["LOG_FOLDER"]
# Ensure Logging folder exists
if not os.path.exists(LOG_FOLDER):
os.mkdir(LOG_FOLDER)
# Import Config.
dictConfig(app.config["LOGGER_CONFIG"])
# Use UTC time when logging.
logging.Formatter.converter = time.gmtime
# Add local logger to rate limiter
with app.app_context():
local_logger = get_local_logger()
for handler in local_logger.handlers:
limiter.logger.addHandler(handler)
# NOTE: for some reason, this is originally the string "False",
# which is truthy. Without this line, the logger does not work.
limiter.logger.disabled = False
# ............. Functions ............
def get_logger():
return logging.getLogger(current_app.config["LOGGER_NAME"])
def get_local_logger():
return logging.getLogger(current_app.config["LOCAL_LOGGER_NAME"])
def get_time_format():
logger_config = current_app.config["LOGGER_CONFIG"]
return logger_config["formatters"]["logger"]["datefmt"]
def log_record_from_json(json):
log_record = LogRecord.from_json(json)
return log_record
def valid_log_record(json):
try:
LogRecord.from_json(json)
return True
except ValueError:
return False
def write_to_log(log_record, time_format):
message = log_record.message
formatted_client_time = isoparse(log_record.client_time).strftime(time_format)
extra = {
"application_name": log_record.application_name,
"process_name": log_record.process_name,
"process_id": log_record.process_id,
"log_level": log_record.log_level,
"client_time": formatted_client_time,
}
# ..Add extra properties
serialized_props = _json.dumps(log_record.extra)
message += " " + serialized_props
get_logger().info(message, extra=extra)
# .............. Routes ..............
@bp.route("/")
@limiter.limit("5 per minute")
def index():
return "Hello World!"
@bp.route("/log", methods=["GET", "POST"])
@authenticate
def log():
"""
* Function Name: log
* Description: This is a simple view that writes to a log file.
* Parameters: None
* Returns: None
"""
if request.method == "POST":
json = request.json
required_params = [
"message",
"applicationName",
"processName",
"processId",
"logLevel",
"dateTime",
]
if any(x not in json for x in required_params):
error_message = "missing required params."
current_app.logger.info(error_message)
return make_response({"message": error_message}, 400)
if not valid_log_record(json):
try:
LogRecord.from_json(json)
except Exception as e:
error_message = "invalid log record: " + str(e)
current_app.logger.info(error_message)
return make_response({"message": error_message}, 400)
log_record = log_record_from_json(json)
# CLEANUP: consider storing the format string in the log record instead
client_time_format = get_time_format()
write_to_log(log_record, client_time_format)
return "Success!"
return "" | nad_logging_service/nad_logging_service/logger.py | import json as _json
import logging
import os
import time
from logging.config import dictConfig
from attr import attrib, attrs, validators
from dateutil.parser import isoparse
from flask import Blueprint, Flask, current_app, make_response, request
from .auth import authenticate
from .rate_limiter import limiter
bp = Blueprint("logger", __name__, url_prefix="/logger")
# ............. Model ............
@attrs(frozen=True)
class LogRecord:
message: str = attrib()
log_level: str = attrib()
@log_level.validator
def _validate_log_level(self, attribute, value):
if value not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
raise ValueError("Invalid log level: " + value)
application_name: str = attrib()
process_name: str = attrib()
process_id: int = attrib(validator=validators.instance_of(int), converter=int)
client_time: str = attrib()
@client_time.validator
def _validate_client_time(self, attribute, value):
try:
isoparse(value)
except Exception:
raise ValueError("Improper date format " + value)
extra: dict = attrib(factory=dict)
@extra.validator
def _validate_extra(self, attribute, value):
if isinstance(value, dict):
return
try:
_json.loads(value)
except Exception as e:
raise ValueError("Improper json format. " + str(e))
@classmethod
def from_json(cls, json):
message = json["message"]
application_name = json["applicationName"]
process_name = json["processName"]
process_id = json["processId"]
log_level = json["logLevel"]
client_time = json["dateTime"]
extra = json["extra"] if "extra" in json else dict()
return cls(
message=message,
application_name=application_name,
process_name=process_name,
process_id=process_id,
log_level=log_level,
client_time=client_time,
extra=extra,
)
"""
Sample Log:
{
"message": "User could not be found.",
"extra": {"userId": 5, "endpoint": "/users/5"},
"logLevel": "ERROR",
"applicationName": "BingoBangoBongo",
"authToken": "<PASSWORD>",
"dateTime": datetime("2020-04-20"),
"processName": "node.exe"
"processId": 6545,
},
"""
# ............. Flask ............
"""
* Function Name: init
* Description: This function is used to initialize the logging service.
* Parameters:
Flask: app
* Returns: None
"""
def init(app: Flask):
""" This is called during app creation to initialize the logger app. """
LOG_FOLDER = app.config["LOG_FOLDER"]
# Ensure Logging folder exists
if not os.path.exists(LOG_FOLDER):
os.mkdir(LOG_FOLDER)
# Import Config.
dictConfig(app.config["LOGGER_CONFIG"])
# Use UTC time when logging.
logging.Formatter.converter = time.gmtime
# Add local logger to rate limiter
with app.app_context():
local_logger = get_local_logger()
for handler in local_logger.handlers:
limiter.logger.addHandler(handler)
# NOTE: for some reason, this is originally the string "False",
# which is truthy. Without this line, the logger does not work.
limiter.logger.disabled = False
# ............. Functions ............
def get_logger():
return logging.getLogger(current_app.config["LOGGER_NAME"])
def get_local_logger():
return logging.getLogger(current_app.config["LOCAL_LOGGER_NAME"])
def get_time_format():
logger_config = current_app.config["LOGGER_CONFIG"]
return logger_config["formatters"]["logger"]["datefmt"]
def log_record_from_json(json):
log_record = LogRecord.from_json(json)
return log_record
def valid_log_record(json):
try:
LogRecord.from_json(json)
return True
except ValueError:
return False
def write_to_log(log_record, time_format):
message = log_record.message
formatted_client_time = isoparse(log_record.client_time).strftime(time_format)
extra = {
"application_name": log_record.application_name,
"process_name": log_record.process_name,
"process_id": log_record.process_id,
"log_level": log_record.log_level,
"client_time": formatted_client_time,
}
# ..Add extra properties
serialized_props = _json.dumps(log_record.extra)
message += " " + serialized_props
get_logger().info(message, extra=extra)
# .............. Routes ..............
@bp.route("/")
@limiter.limit("5 per minute")
def index():
return "Hello World!"
@bp.route("/log", methods=["GET", "POST"])
@authenticate
def log():
"""
* Function Name: log
* Description: This is a simple view that writes to a log file.
* Parameters: None
* Returns: None
"""
if request.method == "POST":
json = request.json
required_params = [
"message",
"applicationName",
"processName",
"processId",
"logLevel",
"dateTime",
]
if any(x not in json for x in required_params):
error_message = "missing required params."
current_app.logger.info(error_message)
return make_response({"message": error_message}, 400)
if not valid_log_record(json):
try:
LogRecord.from_json(json)
except Exception as e:
error_message = "invalid log record: " + str(e)
current_app.logger.info(error_message)
return make_response({"message": error_message}, 400)
log_record = log_record_from_json(json)
# CLEANUP: consider storing the format string in the log record instead
client_time_format = get_time_format()
write_to_log(log_record, client_time_format)
return "Success!"
return "" | 0.413477 | 0.073463 |
from __future__ import print_function
from __future__ import absolute_import
import inspect
from .util import *
from .base_nodes import PyNode
# Base class for all Value Literals
class PyValueLiteral(PyNode):
tag = "value_literal"
def __init__(self, lineno, value):
super(PyValueLiteral,self).__init__()
self.lineno = lineno
self.value = value
def __repr__(self):
return "%s(%d, %s)" % (self.classname(),self.lineno, repr(self.value))
def __json__(self):
return [ self.tag, self.lineno, self.value ]
def __info__(self):
info = super(PyValueLiteral, self).__info__()
info[self.tag]["lineno"] = self.lineno
info[self.tag]["value"] = self.value
return info
def analyse(self):
print("ANALYSING VALUE LITERAL", self.tag)
# Don't go into containers, because there aren't any
self.ntype = self.get_type()
def get_type(self):
raise NotImplementedError("PyValueLiteral does not have any implicit type - its subtypes do")
# All non-number value literals first
class PyString(PyValueLiteral):
tag = "string"
def get_type(self):
return "string"
class PyCharacter(PyValueLiteral):
tag = "character"
def get_type(self):
return "char"
class PyBoolean(PyValueLiteral):
tag = "boolean"
def get_type(self):
return "bool"
# Resist the urge to put PyIdentifiers into a LUT immediately.
class PyIdentifier(PyValueLiteral):
tag = "identifier"
def __init__(self, *args):
super(PyIdentifier, self).__init__(*args)
self.types = []
def add_rvalue(self, expression):
self.context.store(self.value, expression)
def __info__(self):
info = super(PyIdentifier, self).__info__()
info[self.tag]["context "] = self.context
info[self.tag]["types"] = self.types
return info
def get_type(self):
return self.ntype
def analyse(self):
print("PyIdentifier.analyse", self.value, self, dir(self))
for i in dir(self):
if i.startswith("__") and i.endswith("__"):
continue
if "bound method" in repr(getattr(self, i)):
continue
print("attr", self.tag, self, i, repr(getattr(self, i)), getattr(self, i) )
expression = self.context.lookup(self.value)
self.ntype = expression.get_type()
# FIXME: This name is created to allow attribute access lookup
def name(self):
return self.value
class ProfilePyNode(PyIdentifier):
"""Representation of something in the python code that's external to it - from a profile"""
tag = "profile_identifier"
def __init__(self, name, value_type):
self.lineno = 0
self.value = name
#super(ProfilePyNode,self).__init__()
self.ntype = value_type # Logical type of this virtual valie
def analyse(self):
self.ntype = expression.get_type()
def get_type(self):
return self.ntype
class PyAttribute(PyNode):
tag = "attribute"
def __init__(self, lineno, value):
super(PyAttribute, self).__init__()
self.lineno = lineno
self.value = value
def __repr__(self):
return "%s(%d, %s)" % (self.classname(),self.lineno, repr(self.value))
def __json__(self):
return [ self.tag, self.lineno, jdump(self.value) ]
def name(self):
return self.value.name()
def get_type(self):
print("CALLER:", inspect.stack()[1][3])
print("ATTRIBUTE:", self.value)
print(self.context)
raise AttributeError("'PyAttribute' object has no attribute 'get_type'")
class PyAttributeAccess(PyNode):
tag = "attributeaccess"
def __init__(self, expression, attribute):
super(PyAttributeAccess,self).__init__()
self.expression = expression
self.attribute = attribute
# self.addchildren(expression, attribute) # FIXME: This might need to be uncommented. If we get an unusual bug, this might be the issue. (unlikely, but note to help me later)
def __repr__(self):
return "%s(%s, %s)" % (self.classname(), repr(self.expression), repr(self.attribute))
def __json__(self):
return [ self.tag, jdump(self.expression), jdump(self.attribute) ]
def analyse(self):
print("""
**********************************************************************
**********************************************************************
**********************************************************************
**********************************************************************
""")
raise Exception("HERE")
def name(self):
return [ self.expression.name(), self.attribute.name() ]
# Base class for all numbers
class PyNumber(PyValueLiteral):
tag = "number"
def negate(self):
self.value = - self.value
return self
class PyFloat(PyNumber):
tag = "float"
def get_type(self):
return "float"
class PyInteger(PyNumber):
tag = "integer"
def get_type(self):
return "integer"
class PySignedLong(PyNumber):
tag = "signedlong"
def get_type(self):
return "signedlong"
class PyUnSignedLong(PyNumber):
tag = "unsignedlong"
def get_type(self):
return "unsignedlong"
class PyHex(PyNumber):
tag = "hex"
def get_type(self):
return "integer"
class PyOctal(PyNumber):
tag = "octal"
def get_type(self):
return "integer"
class PyBinary(PyNumber):
tag = "binary"
def get_type(self):
return "integer" | pyxie/model/pynodes/values.py |
from __future__ import print_function
from __future__ import absolute_import
import inspect
from .util import *
from .base_nodes import PyNode
# Base class for all Value Literals
class PyValueLiteral(PyNode):
tag = "value_literal"
def __init__(self, lineno, value):
super(PyValueLiteral,self).__init__()
self.lineno = lineno
self.value = value
def __repr__(self):
return "%s(%d, %s)" % (self.classname(),self.lineno, repr(self.value))
def __json__(self):
return [ self.tag, self.lineno, self.value ]
def __info__(self):
info = super(PyValueLiteral, self).__info__()
info[self.tag]["lineno"] = self.lineno
info[self.tag]["value"] = self.value
return info
def analyse(self):
print("ANALYSING VALUE LITERAL", self.tag)
# Don't go into containers, because there aren't any
self.ntype = self.get_type()
def get_type(self):
raise NotImplementedError("PyValueLiteral does not have any implicit type - its subtypes do")
# All non-number value literals first
class PyString(PyValueLiteral):
tag = "string"
def get_type(self):
return "string"
class PyCharacter(PyValueLiteral):
tag = "character"
def get_type(self):
return "char"
class PyBoolean(PyValueLiteral):
tag = "boolean"
def get_type(self):
return "bool"
# Resist the urge to put PyIdentifiers into a LUT immediately.
class PyIdentifier(PyValueLiteral):
tag = "identifier"
def __init__(self, *args):
super(PyIdentifier, self).__init__(*args)
self.types = []
def add_rvalue(self, expression):
self.context.store(self.value, expression)
def __info__(self):
info = super(PyIdentifier, self).__info__()
info[self.tag]["context "] = self.context
info[self.tag]["types"] = self.types
return info
def get_type(self):
return self.ntype
def analyse(self):
print("PyIdentifier.analyse", self.value, self, dir(self))
for i in dir(self):
if i.startswith("__") and i.endswith("__"):
continue
if "bound method" in repr(getattr(self, i)):
continue
print("attr", self.tag, self, i, repr(getattr(self, i)), getattr(self, i) )
expression = self.context.lookup(self.value)
self.ntype = expression.get_type()
# FIXME: This name is created to allow attribute access lookup
def name(self):
return self.value
class ProfilePyNode(PyIdentifier):
"""Representation of something in the python code that's external to it - from a profile"""
tag = "profile_identifier"
def __init__(self, name, value_type):
self.lineno = 0
self.value = name
#super(ProfilePyNode,self).__init__()
self.ntype = value_type # Logical type of this virtual valie
def analyse(self):
self.ntype = expression.get_type()
def get_type(self):
return self.ntype
class PyAttribute(PyNode):
tag = "attribute"
def __init__(self, lineno, value):
super(PyAttribute, self).__init__()
self.lineno = lineno
self.value = value
def __repr__(self):
return "%s(%d, %s)" % (self.classname(),self.lineno, repr(self.value))
def __json__(self):
return [ self.tag, self.lineno, jdump(self.value) ]
def name(self):
return self.value.name()
def get_type(self):
print("CALLER:", inspect.stack()[1][3])
print("ATTRIBUTE:", self.value)
print(self.context)
raise AttributeError("'PyAttribute' object has no attribute 'get_type'")
class PyAttributeAccess(PyNode):
tag = "attributeaccess"
def __init__(self, expression, attribute):
super(PyAttributeAccess,self).__init__()
self.expression = expression
self.attribute = attribute
# self.addchildren(expression, attribute) # FIXME: This might need to be uncommented. If we get an unusual bug, this might be the issue. (unlikely, but note to help me later)
def __repr__(self):
return "%s(%s, %s)" % (self.classname(), repr(self.expression), repr(self.attribute))
def __json__(self):
return [ self.tag, jdump(self.expression), jdump(self.attribute) ]
def analyse(self):
print("""
**********************************************************************
**********************************************************************
**********************************************************************
**********************************************************************
""")
raise Exception("HERE")
def name(self):
return [ self.expression.name(), self.attribute.name() ]
# Base class for all numbers
class PyNumber(PyValueLiteral):
tag = "number"
def negate(self):
self.value = - self.value
return self
class PyFloat(PyNumber):
tag = "float"
def get_type(self):
return "float"
class PyInteger(PyNumber):
tag = "integer"
def get_type(self):
return "integer"
class PySignedLong(PyNumber):
tag = "signedlong"
def get_type(self):
return "signedlong"
class PyUnSignedLong(PyNumber):
tag = "unsignedlong"
def get_type(self):
return "unsignedlong"
class PyHex(PyNumber):
tag = "hex"
def get_type(self):
return "integer"
class PyOctal(PyNumber):
tag = "octal"
def get_type(self):
return "integer"
class PyBinary(PyNumber):
tag = "binary"
def get_type(self):
return "integer" | 0.488039 | 0.143308 |
import sys
import json
from .common import team_users_lower, dialog_min_len
def calc_score(q):
if len(q) > 0:
return sum(q) / float(len(q))
else:
return 0
user_evaluations = dict()
user_names = dict()
user_bots = dict()
lines = sys.stdin.readlines()
for line in lines:
d = json.loads(line)
if d['users'][0]['userType'] == 'org.pavlovai.communication.Bot':
bot_id = d['users'][0]['id']
user_id = d['users'][1]['id']
username = d['users'][1]['username']
user_names[bot_id] = bot_id
user_names[user_id] = username
if username not in user_bots:
user_bots[username] = 0
if dialog_min_len(d['thread']) > 2:
user_bots[username] += 1
elif d['users'][1]['userType'] == 'org.pavlovai.communication.Bot':
bot_id = d['users'][1]['id']
user_id = d['users'][0]['id']
username = d['users'][0]['username']
user_names[bot_id] = bot_id
user_names[user_id] = username
if username not in user_bots:
user_bots[username] = 0
if dialog_min_len(d['thread']) > 2:
user_bots[username] += 1
else:
bot_id = None
user_names[d['users'][0]['id']] = d['users'][0]['username']
user_names[d['users'][1]['id']] = d['users'][1]['username']
user0 = d['users'][0]['id']
user1 = d['users'][1]['id']
if user0 not in user_evaluations:
user_evaluations[user0] = []
if user1 not in user_evaluations:
user_evaluations[user1] = []
for e in d['evaluation']:
if e['userId'] != bot_id:
if e['userId'] == user0:
user_evaluations[user1].append(e['quality'])
elif e['userId'] == user1:
user_evaluations[user0].append(e['quality'])
else:
continue
max_user_score = 0
for u_id in user_evaluations:
if user_names[u_id].lower() in team_users_lower:
max_user_score = max(max_user_score, calc_score(user_evaluations[u_id]))
max_user_bots = 0
for u_name in user_bots:
if u_name.lower() in team_users_lower:
max_user_bots = max(user_bots[u_name], max_user_bots)
leaderboard = []
for u_id in user_evaluations:
user_bot = 0 if user_names[u_id] not in user_bots else user_bots[user_names[u_id]]
user_score = calc_score(user_evaluations[u_id])
if user_names[u_id].lower() in team_users_lower:
score = 0.5 * (user_bot / max_user_bots + user_score/max_user_score)
leaderboard.append((user_names[u_id], score))
leaderboard.sort(key=lambda tup: tup[1], reverse=True)
for item in leaderboard:
print("%s,%s" % item) | utils/user_leaderboard.py |
import sys
import json
from .common import team_users_lower, dialog_min_len
def calc_score(q):
if len(q) > 0:
return sum(q) / float(len(q))
else:
return 0
user_evaluations = dict()
user_names = dict()
user_bots = dict()
lines = sys.stdin.readlines()
for line in lines:
d = json.loads(line)
if d['users'][0]['userType'] == 'org.pavlovai.communication.Bot':
bot_id = d['users'][0]['id']
user_id = d['users'][1]['id']
username = d['users'][1]['username']
user_names[bot_id] = bot_id
user_names[user_id] = username
if username not in user_bots:
user_bots[username] = 0
if dialog_min_len(d['thread']) > 2:
user_bots[username] += 1
elif d['users'][1]['userType'] == 'org.pavlovai.communication.Bot':
bot_id = d['users'][1]['id']
user_id = d['users'][0]['id']
username = d['users'][0]['username']
user_names[bot_id] = bot_id
user_names[user_id] = username
if username not in user_bots:
user_bots[username] = 0
if dialog_min_len(d['thread']) > 2:
user_bots[username] += 1
else:
bot_id = None
user_names[d['users'][0]['id']] = d['users'][0]['username']
user_names[d['users'][1]['id']] = d['users'][1]['username']
user0 = d['users'][0]['id']
user1 = d['users'][1]['id']
if user0 not in user_evaluations:
user_evaluations[user0] = []
if user1 not in user_evaluations:
user_evaluations[user1] = []
for e in d['evaluation']:
if e['userId'] != bot_id:
if e['userId'] == user0:
user_evaluations[user1].append(e['quality'])
elif e['userId'] == user1:
user_evaluations[user0].append(e['quality'])
else:
continue
max_user_score = 0
for u_id in user_evaluations:
if user_names[u_id].lower() in team_users_lower:
max_user_score = max(max_user_score, calc_score(user_evaluations[u_id]))
max_user_bots = 0
for u_name in user_bots:
if u_name.lower() in team_users_lower:
max_user_bots = max(user_bots[u_name], max_user_bots)
leaderboard = []
for u_id in user_evaluations:
user_bot = 0 if user_names[u_id] not in user_bots else user_bots[user_names[u_id]]
user_score = calc_score(user_evaluations[u_id])
if user_names[u_id].lower() in team_users_lower:
score = 0.5 * (user_bot / max_user_bots + user_score/max_user_score)
leaderboard.append((user_names[u_id], score))
leaderboard.sort(key=lambda tup: tup[1], reverse=True)
for item in leaderboard:
print("%s,%s" % item) | 0.101045 | 0.126731 |
import os
import logging
import numpy as np
from math import ceil, floor
from PIL import Image, ImageDraw, ImageFont
from configs.paths import EVAL_DIR
log = logging.getLogger()
colors = np.load(os.path.join(EVAL_DIR, 'Extra/colors.npy')).tolist()
palette = np.load(os.path.join(EVAL_DIR, 'Extra/palette.npy')).tolist()
font = ImageFont.truetype(os.path.join(EVAL_DIR, 'Extra/FreeSansBold.ttf'), 14)
def xy2wh(bbox):
xmin, ymin, xmax, ymax = bbox
return np.array([xmin, ymin, xmax-xmin, ymax-ymin])
def wh2xy(bbox):
xmin, ymin, w, h = bbox
return np.array([xmin, ymin, xmin+w, ymin+h])
def wh2center(bbox):
xmin, ymin, w, h = bbox
xc, yc = bbox[:2] + bbox[2:] / 2
return np.array([xc, yc, w, h])
def center2wh(bbox):
xc, yc, w, h = bbox
x, y = bbox[:2] - bbox[2:] / 2
return np.array([x, y, w, h])
def nms(dets, scores, thresh=None):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
if dets is not None:
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
else:
inds = np.where(ovr[i, order[1:]] <= thresh)[0]
order = order[inds + 1]
return keep
def batch_iou(proposals, gt, return_union=False, return_intersection=False):
bboxes = np.transpose(proposals).reshape((4, -1, 1))
bboxes_x1 = bboxes[0]
bboxes_x2 = bboxes[0]+bboxes[2]
bboxes_y1 = bboxes[1]
bboxes_y2 = bboxes[1]+bboxes[3]
gt = np.transpose(gt).reshape((4, 1, -1))
gt_x1 = gt[0]
gt_x2 = gt[0]+gt[2]
gt_y1 = gt[1]
gt_y2 = gt[1]+gt[3]
widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
np.maximum(bboxes_x1, gt_x1))
heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
np.maximum(bboxes_y1, gt_y1))
intersection = widths*heights
union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
iou = (intersection / union)
if return_intersection or return_union:
output = [iou]
if return_intersection:
output.append(intersection)
if return_union:
output.append(union)
return tuple(output)
else:
return iou
def draw_bbox(img, bboxes=None, scores=None, extra_bboxes=None, extra_scores=None,
cats=None, show=False, size=None, color='red', text_color='red',
extra_color='purple', bbox_format='xywh', frame_width=3):
"""Drawing bounding boxes on top of the images fed"""
def _draw(bboxes, scores, dr, color, cats=None):
def draw_rectangle(draw, coordinates, color, width=1):
for i in range(width):
rect_start = (coordinates[0] - i, coordinates[1] - i)
rect_end = (coordinates[2] + i, coordinates[3] + i)
draw.rectangle((rect_start, rect_end), outline=color)
if bboxes.ndim == 1:
bboxes = bboxes[None, :]
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
if bbox_format == 'xyxy':
x, y, x1, y1 = bbox
elif bbox_format == 'xywh':
x, y, w, h = bbox
x1, y1 = x + w, y + h
if cats is not None:
color = colors[cats[i] % len(colors)]
draw_rectangle(dr, (x, y, x1, y1), color=color, width=frame_width)
if scores is not None:
dr.text([x, y], str(scores[i])[:5], fill=color, font=font)
if isinstance(img, str):
img = Image.open(img)
if isinstance(img, (np.ndarray, np.generic)):
img = Image.fromarray((img).astype('uint8'))
if img is None:
img = Image.new("RGB", size, "white")
if size is not None:
img = img.resize(size)
if bboxes is not None:
dr = ImageDraw.Draw(img)
_draw(bboxes, scores, dr, color, cats)
if extra_bboxes is not None:
_draw(extra_bboxes, extra_scores, dr, extra_color)
del dr
if show:
img.show()
return img | utils/utils_bbox.py | import os
import logging
import numpy as np
from math import ceil, floor
from PIL import Image, ImageDraw, ImageFont
from configs.paths import EVAL_DIR
log = logging.getLogger()
colors = np.load(os.path.join(EVAL_DIR, 'Extra/colors.npy')).tolist()
palette = np.load(os.path.join(EVAL_DIR, 'Extra/palette.npy')).tolist()
font = ImageFont.truetype(os.path.join(EVAL_DIR, 'Extra/FreeSansBold.ttf'), 14)
def xy2wh(bbox):
xmin, ymin, xmax, ymax = bbox
return np.array([xmin, ymin, xmax-xmin, ymax-ymin])
def wh2xy(bbox):
xmin, ymin, w, h = bbox
return np.array([xmin, ymin, xmin+w, ymin+h])
def wh2center(bbox):
xmin, ymin, w, h = bbox
xc, yc = bbox[:2] + bbox[2:] / 2
return np.array([xc, yc, w, h])
def center2wh(bbox):
xc, yc, w, h = bbox
x, y = bbox[:2] - bbox[2:] / 2
return np.array([x, y, w, h])
def nms(dets, scores, thresh=None):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
if dets is not None:
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
else:
inds = np.where(ovr[i, order[1:]] <= thresh)[0]
order = order[inds + 1]
return keep
def batch_iou(proposals, gt, return_union=False, return_intersection=False):
bboxes = np.transpose(proposals).reshape((4, -1, 1))
bboxes_x1 = bboxes[0]
bboxes_x2 = bboxes[0]+bboxes[2]
bboxes_y1 = bboxes[1]
bboxes_y2 = bboxes[1]+bboxes[3]
gt = np.transpose(gt).reshape((4, 1, -1))
gt_x1 = gt[0]
gt_x2 = gt[0]+gt[2]
gt_y1 = gt[1]
gt_y2 = gt[1]+gt[3]
widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) -
np.maximum(bboxes_x1, gt_x1))
heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) -
np.maximum(bboxes_y1, gt_y1))
intersection = widths*heights
union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection
iou = (intersection / union)
if return_intersection or return_union:
output = [iou]
if return_intersection:
output.append(intersection)
if return_union:
output.append(union)
return tuple(output)
else:
return iou
def draw_bbox(img, bboxes=None, scores=None, extra_bboxes=None, extra_scores=None,
cats=None, show=False, size=None, color='red', text_color='red',
extra_color='purple', bbox_format='xywh', frame_width=3):
"""Drawing bounding boxes on top of the images fed"""
def _draw(bboxes, scores, dr, color, cats=None):
def draw_rectangle(draw, coordinates, color, width=1):
for i in range(width):
rect_start = (coordinates[0] - i, coordinates[1] - i)
rect_end = (coordinates[2] + i, coordinates[3] + i)
draw.rectangle((rect_start, rect_end), outline=color)
if bboxes.ndim == 1:
bboxes = bboxes[None, :]
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
if bbox_format == 'xyxy':
x, y, x1, y1 = bbox
elif bbox_format == 'xywh':
x, y, w, h = bbox
x1, y1 = x + w, y + h
if cats is not None:
color = colors[cats[i] % len(colors)]
draw_rectangle(dr, (x, y, x1, y1), color=color, width=frame_width)
if scores is not None:
dr.text([x, y], str(scores[i])[:5], fill=color, font=font)
if isinstance(img, str):
img = Image.open(img)
if isinstance(img, (np.ndarray, np.generic)):
img = Image.fromarray((img).astype('uint8'))
if img is None:
img = Image.new("RGB", size, "white")
if size is not None:
img = img.resize(size)
if bboxes is not None:
dr = ImageDraw.Draw(img)
_draw(bboxes, scores, dr, color, cats)
if extra_bboxes is not None:
_draw(extra_bboxes, extra_scores, dr, extra_color)
del dr
if show:
img.show()
return img | 0.550849 | 0.383641 |
import pytest
from os.path import join
import numpy as np
import pandas as pd
from delphi_jhu.geo import geo_map, INCIDENCE_BASE
from delphi_utils import GeoMapper
class TestGeoMap:
def test_incorrect_geo(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
with pytest.raises(ValueError):
geo_map(df, "département")
def test_fips(self, jhu_confirmed_test_data):
test_df = jhu_confirmed_test_data
new_df = geo_map(test_df, "county")
# Test the same fips and timestamps are present
assert new_df["geo_id"].eq(test_df["fips"]).all()
assert new_df["timestamp"].eq(test_df["timestamp"]).all()
new_df = new_df.set_index(["geo_id", "timestamp"])
test_df = test_df.set_index(["fips", "timestamp"])
expected_incidence = test_df["new_counts"] / test_df["population"] * INCIDENCE_BASE
expected_cumulative_prop = test_df["cumulative_counts"] / test_df["population"] * INCIDENCE_BASE
# Manually calculate the proportional signals in Alabama and verify equality
assert new_df["incidence"].eq(expected_incidence).all()
assert new_df["cumulative_prop"].eq(expected_cumulative_prop.values).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any()
def test_state(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
new_df = geo_map(df, "state")
gmpr = GeoMapper()
test_df = gmpr.replace_geocode(df, "fips", "state_id", date_col="timestamp", new_col="state")
# Test the same states and timestamps are present
assert new_df["geo_id"].eq(test_df["state"]).all()
assert new_df["timestamp"].eq(test_df["timestamp"]).all()
new_df = new_df.set_index(["geo_id", "timestamp"])
test_df = test_df.set_index(["state", "timestamp"])
# Get the Alabama state population total in a different way
summed_population = df.set_index("fips").filter(regex="01\d{2}[1-9]", axis=0).groupby("fips").first()["population"].sum()
mega_fips_record = df.set_index(["fips", "timestamp"]).loc[("01000", "2020-09-15"), "population"].sum()
# Compare with the county megaFIPS record
assert summed_population == mega_fips_record
# Compare with the population in the transformed df
assert new_df.loc["al"]["population"].eq(summed_population).all()
# Make sure diffs and cumulative are equal
assert new_df["new_counts"].eq(test_df["new_counts"]).all()
assert new_df["cumulative_counts"].eq(test_df["cumulative_counts"]).all()
# Manually calculate the proportional signals in Alabama and verify equality
expected_incidence = test_df.loc["al"]["new_counts"] / summed_population * INCIDENCE_BASE
expected_cumulative_prop = test_df.loc["al"]["cumulative_counts"] / summed_population * INCIDENCE_BASE
assert new_df.loc["al", "incidence"].eq(expected_incidence).all()
assert new_df.loc["al", "cumulative_prop"].eq(expected_cumulative_prop).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any()
def test_other_geos(self, jhu_confirmed_test_data):
for geo in ["msa", "hrr", "hhs", "nation"]:
test_df = jhu_confirmed_test_data
new_df = geo_map(test_df, geo)
gmpr = GeoMapper()
test_df = gmpr.replace_geocode(test_df, "fips", geo, date_col="timestamp")
new_df = new_df.set_index(["geo_id", "timestamp"]).sort_index()
test_df = test_df.set_index([geo, "timestamp"]).sort_index()
# Check that the non-proportional columns are identical
assert new_df.eq(test_df)[["new_counts", "population", "cumulative_counts"]].all().all()
# Check that the proportional signals are identical
exp_incidence = test_df["new_counts"] / test_df["population"] * INCIDENCE_BASE
expected_cumulative_prop = test_df["cumulative_counts"] / test_df["population"] * INCIDENCE_BASE
assert new_df["incidence"].eq(exp_incidence).all()
assert new_df["cumulative_prop"].eq(expected_cumulative_prop).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any() | jhu/tests/test_geo.py | import pytest
from os.path import join
import numpy as np
import pandas as pd
from delphi_jhu.geo import geo_map, INCIDENCE_BASE
from delphi_utils import GeoMapper
class TestGeoMap:
def test_incorrect_geo(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
with pytest.raises(ValueError):
geo_map(df, "département")
def test_fips(self, jhu_confirmed_test_data):
test_df = jhu_confirmed_test_data
new_df = geo_map(test_df, "county")
# Test the same fips and timestamps are present
assert new_df["geo_id"].eq(test_df["fips"]).all()
assert new_df["timestamp"].eq(test_df["timestamp"]).all()
new_df = new_df.set_index(["geo_id", "timestamp"])
test_df = test_df.set_index(["fips", "timestamp"])
expected_incidence = test_df["new_counts"] / test_df["population"] * INCIDENCE_BASE
expected_cumulative_prop = test_df["cumulative_counts"] / test_df["population"] * INCIDENCE_BASE
# Manually calculate the proportional signals in Alabama and verify equality
assert new_df["incidence"].eq(expected_incidence).all()
assert new_df["cumulative_prop"].eq(expected_cumulative_prop.values).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any()
def test_state(self, jhu_confirmed_test_data):
df = jhu_confirmed_test_data
new_df = geo_map(df, "state")
gmpr = GeoMapper()
test_df = gmpr.replace_geocode(df, "fips", "state_id", date_col="timestamp", new_col="state")
# Test the same states and timestamps are present
assert new_df["geo_id"].eq(test_df["state"]).all()
assert new_df["timestamp"].eq(test_df["timestamp"]).all()
new_df = new_df.set_index(["geo_id", "timestamp"])
test_df = test_df.set_index(["state", "timestamp"])
# Get the Alabama state population total in a different way
summed_population = df.set_index("fips").filter(regex="01\d{2}[1-9]", axis=0).groupby("fips").first()["population"].sum()
mega_fips_record = df.set_index(["fips", "timestamp"]).loc[("01000", "2020-09-15"), "population"].sum()
# Compare with the county megaFIPS record
assert summed_population == mega_fips_record
# Compare with the population in the transformed df
assert new_df.loc["al"]["population"].eq(summed_population).all()
# Make sure diffs and cumulative are equal
assert new_df["new_counts"].eq(test_df["new_counts"]).all()
assert new_df["cumulative_counts"].eq(test_df["cumulative_counts"]).all()
# Manually calculate the proportional signals in Alabama and verify equality
expected_incidence = test_df.loc["al"]["new_counts"] / summed_population * INCIDENCE_BASE
expected_cumulative_prop = test_df.loc["al"]["cumulative_counts"] / summed_population * INCIDENCE_BASE
assert new_df.loc["al", "incidence"].eq(expected_incidence).all()
assert new_df.loc["al", "cumulative_prop"].eq(expected_cumulative_prop).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any()
def test_other_geos(self, jhu_confirmed_test_data):
for geo in ["msa", "hrr", "hhs", "nation"]:
test_df = jhu_confirmed_test_data
new_df = geo_map(test_df, geo)
gmpr = GeoMapper()
test_df = gmpr.replace_geocode(test_df, "fips", geo, date_col="timestamp")
new_df = new_df.set_index(["geo_id", "timestamp"]).sort_index()
test_df = test_df.set_index([geo, "timestamp"]).sort_index()
# Check that the non-proportional columns are identical
assert new_df.eq(test_df)[["new_counts", "population", "cumulative_counts"]].all().all()
# Check that the proportional signals are identical
exp_incidence = test_df["new_counts"] / test_df["population"] * INCIDENCE_BASE
expected_cumulative_prop = test_df["cumulative_counts"] / test_df["population"] * INCIDENCE_BASE
assert new_df["incidence"].eq(exp_incidence).all()
assert new_df["cumulative_prop"].eq(expected_cumulative_prop).all()
# Make sure the prop signals don't have inf values
assert not new_df["incidence"].eq(np.inf).any()
assert not new_df["cumulative_prop"].eq(np.inf).any() | 0.659844 | 0.676339 |
import numpy as np
from scipy import ndimage
from scipy import misc
from PIL import Image
import torch
import torch.nn.functional as tf
import skimage.io as io
import png
TAG_CHAR = np.array([202021.25], np.float32)
UNKNOWN_FLOW_THRESH = 1e7
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols + 1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k0 - 1] / 255
col1 = tmp[k1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col : col + YG, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, YG) / YG)
)
colorwheel[col : col + YG, 1] = 255
col += YG
# GC
colorwheel[col : col + GC, 1] = 255
colorwheel[col : col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col : col + CB, 1] = 255 - np.transpose(
np.floor(255 * np.arange(0, CB) / CB)
)
colorwheel[col : col + CB, 2] = 255
col += CB
# BM
colorwheel[col : col + BM, 2] = 255
colorwheel[col : col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
col += +BM
# MR
colorwheel[col : col + MR, 2] = 255 - np.transpose(
np.floor(255 * np.arange(0, MR) / MR)
)
colorwheel[col : col + MR, 0] = 255
return colorwheel
def flow_to_png_middlebury(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
flow = flow.transpose([1, 2, 0])
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.0
maxv = -999.0
minu = 999.0
minv = 999.0
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
# maxrad = 4
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def numpy2torch(array):
assert isinstance(array, np.ndarray)
if array.ndim == 3:
array = np.transpose(array, (2, 0, 1))
else:
array = np.expand_dims(array, axis=0)
return torch.from_numpy(array.copy()).float()
def get_pixelgrid(b, h, w, flow=None, direction="forward"):
# get heterogeneous coordinates pixel grid
"""generate heterogeneous coord pixel grid
Returns:
[torch.Tensor]: heterogenous coordinates pixel grid
"""
assert direction in ["forward", "backward"]
grid_h = torch.linspace(0.0, w - 1, w).view(1, 1, 1, w).expand(b, 1, h, w)
grid_v = torch.linspace(0.0, h - 1, h).view(1, 1, h, 1).expand(b, 1, h, w)
ones = torch.ones_like(grid_h)
if flow is None:
pixelgrid = (
torch.cat((grid_h, grid_v, ones), dim=1).float().requires_grad_(False)
)
else:
if direction == "backward":
flow = -flow
pixelgrid = (
torch.cat(
(grid_h + flow[:, 0:1, :, :], grid_v + flow[:, 1:2, :, :], ones), dim=1
)
.float()
.requires_grad_(False)
)
return pixelgrid
def pixel2pts(depth, intrinsic, rotation=None, C=None, flow=None):
b, _, h, w = depth.size()
# * get heterogenous coordinates
pixelgrid = get_pixelgrid(b, h, w, flow)
depth_mat = depth.view(b, 1, -1)
pixel_mat = pixelgrid.view(b, 3, -1)
# * back-projection
pts_mat = torch.matmul(torch.inverse(intrinsic), pixel_mat) * depth_mat
if rotation != None and C != None:
pts_mat = depth_mat * pts_mat @ rotation + C
else:
pts_mat *= depth_mat
pts = pts_mat.view(b, -1, h, w)
return pts, pixelgrid
def pts2pixel(pts, intrinsics, flow=None):
"""K @ Pts and normalize by dividing channel w. output 2D coordinates in camera world"""
"""[summary]
Returns:
torch.Tensor: 2D coordinates of pixel world
"""
b, _, h, w = pts.size()
proj_pts = torch.matmul(intrinsics, pts.view(b, 3, -1))
pixels_mat = proj_pts.div(proj_pts[:, 2:3, :] + 1e-8)[:, 0:2, :] # devide z
return pixels_mat.view(b, 2, h, w)
def pts2pixel_ms(intrinsic, pts, flow, disp_size):
sf_s = tf.interpolate(flow, disp_size, mode="bilinear", align_corners=True)
pts_tform = pts + sf_s
coord = pts2pixel(pts_tform, intrinsic)
# * normalize grid into [-1,1]
norm_coord_w = coord[:, 0:1, :, :] / (disp_size[1] - 1) * 2 - 1
norm_coord_h = coord[:, 1:2, :, :] / (disp_size[0] - 1) * 2 - 1
norm_coord = torch.cat((norm_coord_w, norm_coord_h), dim=1)
return sf_s, pts_tform, norm_coord
def disp2depth_kitti(pred_disp, focal_length):
pred_depth = focal_length * 0.54 / pred_disp
pred_depth = torch.clamp(pred_depth, 1e-3, 80)
return pred_depth
def pixel2pts_ms(output_disp, intrinsic, rotation=None, C=None, flow=None):
focal_length = intrinsic[:, 0, 0]
output_depth = disp2depth_kitti(output_disp, focal_length)
pts, _ = pixel2pts(output_depth, intrinsic, rotation=rotation, C=C, flow=flow)
return pts
def get_grid(x):
grid_H = (
torch.linspace(-1.0, 1.0, x.size(3))
.view(1, 1, 1, x.size(3))
.expand(x.size(0), 1, x.size(2), x.size(3))
)
grid_V = (
torch.linspace(-1.0, 1.0, x.size(2))
.view(1, 1, x.size(2), 1)
.expand(x.size(0), 1, x.size(2), x.size(3))
)
grid = torch.cat([grid_H, grid_V], 1)
grids_cuda = grid.float().requires_grad_(False)
return grids_cuda
def read_png_depth(depth_file):
disp_np = io.imread(depth_file).astype(np.uint16) / 256.0
disp_np = np.expand_dims(disp_np, axis=2)
mask_disp = (disp_np > 0).astype(np.float64)
return disp_np
def read_png_flow(flow_file):
flow_object = png.Reader(filename=flow_file)
flow_direct = flow_object.asDirect()
flow_data = list(flow_direct[2])
(w, h) = flow_direct[3]["size"]
flow = np.zeros((h, w, 3), dtype=np.float64)
for i in range(len(flow_data)):
flow[i, :, 0] = flow_data[i][0::3]
flow[i, :, 1] = flow_data[i][1::3]
flow[i, :, 2] = flow_data[i][2::3]
invalid_idx = flow[:, :, 2] == 0
flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
return flow[:, :, 0:2] | demo/demo_generator/utils_misc.py | import numpy as np
from scipy import ndimage
from scipy import misc
from PIL import Image
import torch
import torch.nn.functional as tf
import skimage.io as io
import png
TAG_CHAR = np.array([202021.25], np.float32)
UNKNOWN_FLOW_THRESH = 1e7
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols + 1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k0 - 1] / 255
col1 = tmp[k1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col : col + YG, 0] = 255 - np.transpose(
np.floor(255 * np.arange(0, YG) / YG)
)
colorwheel[col : col + YG, 1] = 255
col += YG
# GC
colorwheel[col : col + GC, 1] = 255
colorwheel[col : col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col : col + CB, 1] = 255 - np.transpose(
np.floor(255 * np.arange(0, CB) / CB)
)
colorwheel[col : col + CB, 2] = 255
col += CB
# BM
colorwheel[col : col + BM, 2] = 255
colorwheel[col : col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
col += +BM
# MR
colorwheel[col : col + MR, 2] = 255 - np.transpose(
np.floor(255 * np.arange(0, MR) / MR)
)
colorwheel[col : col + MR, 0] = 255
return colorwheel
def flow_to_png_middlebury(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
flow = flow.transpose([1, 2, 0])
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.0
maxv = -999.0
minu = 999.0
minv = 999.0
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
# maxrad = 4
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def numpy2torch(array):
assert isinstance(array, np.ndarray)
if array.ndim == 3:
array = np.transpose(array, (2, 0, 1))
else:
array = np.expand_dims(array, axis=0)
return torch.from_numpy(array.copy()).float()
def get_pixelgrid(b, h, w, flow=None, direction="forward"):
# get heterogeneous coordinates pixel grid
"""generate heterogeneous coord pixel grid
Returns:
[torch.Tensor]: heterogenous coordinates pixel grid
"""
assert direction in ["forward", "backward"]
grid_h = torch.linspace(0.0, w - 1, w).view(1, 1, 1, w).expand(b, 1, h, w)
grid_v = torch.linspace(0.0, h - 1, h).view(1, 1, h, 1).expand(b, 1, h, w)
ones = torch.ones_like(grid_h)
if flow is None:
pixelgrid = (
torch.cat((grid_h, grid_v, ones), dim=1).float().requires_grad_(False)
)
else:
if direction == "backward":
flow = -flow
pixelgrid = (
torch.cat(
(grid_h + flow[:, 0:1, :, :], grid_v + flow[:, 1:2, :, :], ones), dim=1
)
.float()
.requires_grad_(False)
)
return pixelgrid
def pixel2pts(depth, intrinsic, rotation=None, C=None, flow=None):
b, _, h, w = depth.size()
# * get heterogenous coordinates
pixelgrid = get_pixelgrid(b, h, w, flow)
depth_mat = depth.view(b, 1, -1)
pixel_mat = pixelgrid.view(b, 3, -1)
# * back-projection
pts_mat = torch.matmul(torch.inverse(intrinsic), pixel_mat) * depth_mat
if rotation != None and C != None:
pts_mat = depth_mat * pts_mat @ rotation + C
else:
pts_mat *= depth_mat
pts = pts_mat.view(b, -1, h, w)
return pts, pixelgrid
def pts2pixel(pts, intrinsics, flow=None):
"""K @ Pts and normalize by dividing channel w. output 2D coordinates in camera world"""
"""[summary]
Returns:
torch.Tensor: 2D coordinates of pixel world
"""
b, _, h, w = pts.size()
proj_pts = torch.matmul(intrinsics, pts.view(b, 3, -1))
pixels_mat = proj_pts.div(proj_pts[:, 2:3, :] + 1e-8)[:, 0:2, :] # devide z
return pixels_mat.view(b, 2, h, w)
def pts2pixel_ms(intrinsic, pts, flow, disp_size):
sf_s = tf.interpolate(flow, disp_size, mode="bilinear", align_corners=True)
pts_tform = pts + sf_s
coord = pts2pixel(pts_tform, intrinsic)
# * normalize grid into [-1,1]
norm_coord_w = coord[:, 0:1, :, :] / (disp_size[1] - 1) * 2 - 1
norm_coord_h = coord[:, 1:2, :, :] / (disp_size[0] - 1) * 2 - 1
norm_coord = torch.cat((norm_coord_w, norm_coord_h), dim=1)
return sf_s, pts_tform, norm_coord
def disp2depth_kitti(pred_disp, focal_length):
pred_depth = focal_length * 0.54 / pred_disp
pred_depth = torch.clamp(pred_depth, 1e-3, 80)
return pred_depth
def pixel2pts_ms(output_disp, intrinsic, rotation=None, C=None, flow=None):
focal_length = intrinsic[:, 0, 0]
output_depth = disp2depth_kitti(output_disp, focal_length)
pts, _ = pixel2pts(output_depth, intrinsic, rotation=rotation, C=C, flow=flow)
return pts
def get_grid(x):
grid_H = (
torch.linspace(-1.0, 1.0, x.size(3))
.view(1, 1, 1, x.size(3))
.expand(x.size(0), 1, x.size(2), x.size(3))
)
grid_V = (
torch.linspace(-1.0, 1.0, x.size(2))
.view(1, 1, x.size(2), 1)
.expand(x.size(0), 1, x.size(2), x.size(3))
)
grid = torch.cat([grid_H, grid_V], 1)
grids_cuda = grid.float().requires_grad_(False)
return grids_cuda
def read_png_depth(depth_file):
disp_np = io.imread(depth_file).astype(np.uint16) / 256.0
disp_np = np.expand_dims(disp_np, axis=2)
mask_disp = (disp_np > 0).astype(np.float64)
return disp_np
def read_png_flow(flow_file):
flow_object = png.Reader(filename=flow_file)
flow_direct = flow_object.asDirect()
flow_data = list(flow_direct[2])
(w, h) = flow_direct[3]["size"]
flow = np.zeros((h, w, 3), dtype=np.float64)
for i in range(len(flow_data)):
flow[i, :, 0] = flow_data[i][0::3]
flow[i, :, 1] = flow_data[i][1::3]
flow[i, :, 2] = flow_data[i][2::3]
invalid_idx = flow[:, :, 2] == 0
flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0
flow[invalid_idx, 0] = 0
flow[invalid_idx, 1] = 0
return flow[:, :, 0:2] | 0.74512 | 0.55097 |
from __future__ import annotations
from dataclasses import astuple, dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Generic, Iterator, TypeVar
from dcor._dcor_internals import _af_inv_scaled
from ._dcor_internals import (
MatrixCentered,
_distance_matrix,
_u_distance_matrix,
mean_product,
u_product,
)
from ._fast_dcov_avl import _distance_covariance_sqr_avl_generic
from ._fast_dcov_mergesort import _distance_covariance_sqr_mergesort_generic
from ._utils import ArrayType, CompileMode, _sqrt, get_namespace
T = TypeVar("T", bound=ArrayType)
if TYPE_CHECKING:
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol
else:
Protocol = object
class DCovFunction(Protocol):
"""Callback protocol for centering method."""
def __call__(self, __x: T, __y: T, *, compile_mode: CompileMode) -> T:
...
class _DcovAlgorithmInternals():
def __init__(
self,
*,
dcov_sqr=None,
u_dcov_sqr=None,
dcor_sqr=None,
u_dcor_sqr=None,
stats_sqr=None,
u_stats_sqr=None,
dcov_generic=None,
stats_generic=None,
):
# Dcov and U-Dcov
if dcov_generic is not None:
self.dcov_sqr = (
lambda *args, **kwargs: dcov_generic(
*args,
**kwargs,
unbiased=False,
)
)
self.u_dcov_sqr = (
lambda *args, **kwargs: dcov_generic(
*args,
**kwargs,
unbiased=True,
)
)
else:
self.dcov_sqr = dcov_sqr
self.u_dcov_sqr = u_dcov_sqr
# Stats
if stats_sqr is not None:
self.stats_sqr = stats_sqr
else:
if stats_generic is None:
self.stats_sqr = (
lambda *args, **kwargs: _distance_stats_sqr_generic(
*args,
**kwargs,
dcov_function=self.dcov_sqr,
)
)
else:
self.stats_sqr = (
lambda *args, **kwargs: stats_generic(
*args,
**kwargs,
matrix_centered=_distance_matrix,
product=mean_product,
)
)
# U-Stats
if u_stats_sqr is not None:
self.u_stats_sqr = u_stats_sqr
else:
if stats_generic is None:
self.u_stats_sqr = (
lambda *args, **kwargs: _distance_stats_sqr_generic(
*args,
**kwargs,
dcov_function=self.u_dcov_sqr,
)
)
else:
self.u_stats_sqr = (
lambda *args, **kwargs: stats_generic(
*args,
**kwargs,
matrix_centered=_u_distance_matrix,
product=u_product,
)
)
# Dcor
if dcor_sqr is not None:
self.dcor_sqr = dcor_sqr
else:
self.dcor_sqr = lambda *args, **kwargs: self.stats_sqr(
*args,
**kwargs,
).correlation_xy
# U-Dcor
if u_dcor_sqr is not None:
self.u_dcor_sqr = u_dcor_sqr
else:
self.u_dcor_sqr = lambda *args, **kwargs: self.u_stats_sqr(
*args,
**kwargs,
).correlation_xy
class _DcovAlgorithmInternalsAuto():
def _dispatch(
self,
x: T,
y: T,
*,
method: str,
exponent: float,
**kwargs: Any,
) -> Any:
if _can_use_fast_algorithm(x, y, exponent):
return getattr(DistanceCovarianceMethod.AVL.value, method)(
x,
y,
exponent=exponent,
**kwargs,
)
else:
return getattr(
DistanceCovarianceMethod.NAIVE.value, method)(
x,
y,
exponent=exponent,
**kwargs,
)
def __getattr__(self, method: str) -> Any:
if method[0] != '_':
return lambda *args, **kwargs: self._dispatch(
*args,
**kwargs,
method=method,
)
else:
raise AttributeError(
f"{self.__class__.__name__!r} object has no "
f"attribute {method!r}",
)
@dataclass(frozen=True)
class Stats(Generic[T]):
"""Distance covariance related stats."""
covariance_xy: T
correlation_xy: T
variance_x: T
variance_y: T
def __iter__(self) -> Iterator[T]:
return iter(astuple(self))
def _naive_check_compile_mode(compile_mode: CompileMode) -> None:
"""Check that compile mode is AUTO or NO_COMPILE and raises otherwise."""
if compile_mode not in (CompileMode.AUTO, CompileMode.NO_COMPILE):
raise NotImplementedError(
f"Compile mode {compile_mode} not implemented.",
)
def _distance_covariance_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
_naive_check_compile_mode(compile_mode)
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b)
def _u_distance_covariance_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
_naive_check_compile_mode(compile_mode)
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b)
def _distance_sqr_stats_naive_generic(
x: T,
y: T,
matrix_centered: MatrixCentered,
product: Callable[[T, T], T],
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""Compute generic squared stats."""
_naive_check_compile_mode(compile_mode)
a = matrix_centered(x, exponent=exponent)
b = matrix_centered(y, exponent=exponent)
covariance_xy_sqr = product(a, b)
variance_x_sqr = product(a, a)
variance_y_sqr = product(b, b)
xp = get_namespace(x, y)
denominator_sqr = xp.abs(variance_x_sqr * variance_y_sqr)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = xp.asarray(0, dtype=covariance_xy_sqr.dtype)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(
covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr,
)
def _distance_correlation_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""Biased distance correlation estimator between two matrices."""
_naive_check_compile_mode(compile_mode)
return _distance_sqr_stats_naive_generic(
x,
y,
matrix_centered=_distance_matrix,
product=mean_product,
exponent=exponent,
).correlation_xy
def _u_distance_correlation_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""Bias-corrected distance correlation estimator between two matrices."""
_naive_check_compile_mode(compile_mode)
return _distance_sqr_stats_naive_generic(
x,
y,
matrix_centered=_u_distance_matrix,
product=u_product,
exponent=exponent,
).correlation_xy
def _is_random_variable(x: T) -> bool:
"""
Check if the matrix x correspond to a random variable.
The matrix is considered a random variable if it is a vector
or a matrix corresponding to a column vector. Otherwise,
the matrix correspond to a random vector.
"""
return len(x.shape) == 1 or x.shape[1] == 1
def _can_use_fast_algorithm(x: T, y: T, exponent: float = 1) -> bool:
"""
Check if the fast algorithm for distance stats can be used.
The fast algorithm has complexity :math:`O(NlogN)`, better than the
complexity of the naive algorithm (:math:`O(N^2)`).
The algorithm can only be used for random variables (not vectors) where
the number of instances is greater than 3. Also, the exponent must be 1.
"""
return (
_is_random_variable(x) and _is_random_variable(y)
and x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1
)
def _distance_stats_sqr_generic(
x: T,
y: T,
*,
exponent: float = 1,
dcov_function: DCovFunction,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""Compute the distance stats using a dcov algorithm."""
if exponent != 1:
raise ValueError(f"Exponent should be 1 but is {exponent} instead.")
xp = get_namespace(x, y)
covariance_xy_sqr = dcov_function(x, y, compile_mode=compile_mode)
variance_x_sqr = dcov_function(x, x, compile_mode=compile_mode)
variance_y_sqr = dcov_function(y, y, compile_mode=compile_mode)
denominator_sqr_signed = variance_x_sqr * variance_y_sqr
denominator_sqr = xp.abs(denominator_sqr_signed)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = xp.asarray(0, dtype=covariance_xy_sqr.dtype)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(
covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr,
)
class DistanceCovarianceMethod(Enum):
"""
Method used for computing the distance covariance.
"""
AUTO = _DcovAlgorithmInternalsAuto()
"""
Try to select the best algorithm. It will try to use a fast
algorithm if possible. Otherwise it will use the naive
implementation.
"""
NAIVE = _DcovAlgorithmInternals(
dcov_sqr=_distance_covariance_sqr_naive,
u_dcov_sqr=_u_distance_covariance_sqr_naive,
dcor_sqr=_distance_correlation_sqr_naive,
u_dcor_sqr=_u_distance_correlation_sqr_naive,
stats_generic=_distance_sqr_stats_naive_generic,
)
r"""
Use the usual estimator of the distance covariance, which is
:math:`O(n^2)`
"""
AVL = _DcovAlgorithmInternals(
dcov_generic=_distance_covariance_sqr_avl_generic,
)
r"""
Use the fast implementation from
:cite:`b-fast_distance_correlation_avl` which is
:math:`O(n\log n)`
"""
MERGESORT = _DcovAlgorithmInternals(
dcov_generic=_distance_covariance_sqr_mergesort_generic,
)
r"""
Use the fast implementation from
:cite:`b-fast_distance_correlation_mergesort` which is
:math:`O(n\log n)`
"""
def __repr__(self) -> str:
return '%s.%s' % (self.__class__.__name__, self.name)
def _to_algorithm(
algorithm: DistanceCovarianceMethod | str,
) -> DistanceCovarianceMethod:
"""Convert to algorithm if string."""
if isinstance(algorithm, DistanceCovarianceMethod):
return algorithm
return DistanceCovarianceMethod[algorithm.upper()]
def distance_covariance_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the squared distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Biased estimator of the squared distance covariance.
See Also:
distance_covariance
u_distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904...
"""
method = _to_algorithm(method)
return method.value.dcov_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_covariance_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Unbiased estimator for the squared distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the unbiased estimator of the squared distance covariance.
See Also:
distance_covariance
distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_covariance_sqr(a, a) # doctest: +ELLIPSIS
42.6666666...
>>> dcor.u_distance_covariance_sqr(a, b) # doctest: +ELLIPSIS
-2.6666666...
>>> dcor.u_distance_covariance_sqr(b, b) # doctest: +ELLIPSIS
0.6666666...
>>> dcor.u_distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
-0.2996598...
"""
method = _to_algorithm(method)
return method.value.u_dcov_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_covariance(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Biased estimator of the distance covariance.
See Also:
distance_covariance_sqr
u_distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_covariance(a, a) # doctest: +ELLIPSIS
7.2111025...
>>> dcor.distance_covariance(a, b)
1.0
>>> dcor.distance_covariance(b, b)
0.5
>>> dcor.distance_covariance(a, b, exponent=0.5)
0.6087614...
"""
return _sqrt(
distance_covariance_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
def distance_stats_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""
Usual (biased) statistics related with the squared distance covariance.
Computes the usual (biased) estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing squared distance covariance,
squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also:
distance_covariance_sqr
distance_correlation_sqr
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0,
variance_y=52.0)
>>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.2773500...,
variance_x=52.0, variance_y=0.25)
>>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25,
variance_y=0.25)
>>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308...,
variance_x=2.7209220..., variance_y=0.25)
"""
method = _to_algorithm(method)
return method.value.stats_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_stats_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Unbiased statistics related with the squared distance covariance.
Computes the unbiased estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing squared distance covariance,
squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also:
u_distance_covariance_sqr
u_distance_correlation_sqr
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=42.6666666..., correlation_xy=1.0,
variance_x=42.6666666..., variance_y=42.6666666...)
>>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5,
variance_x=42.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.6666666..., correlation_xy=1.0,
variance_x=0.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479...,
variance_x=0.8209855..., variance_y=0.6666666...)
"""
method = _to_algorithm(method)
return method.value.u_stats_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_stats(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""
Usual (biased) statistics related with the distance covariance.
Computes the usual (biased) estimators for the distance covariance
and distance correlation between two random vectors, and the
individual distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing distance covariance,
distance correlation,
distance variance of the first random vector and
distance variance of the second random vector.
See Also:
distance_covariance
distance_correlation
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_stats(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=7.2111025..., correlation_xy=1.0,
variance_x=7.2111025..., variance_y=7.2111025...)
>>> dcor.distance_stats(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.5266403...,
variance_x=7.2111025..., variance_y=0.5)
>>> dcor.distance_stats(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.5, correlation_xy=1.0, variance_x=0.5,
variance_y=0.5)
>>> dcor.distance_stats(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.6087614..., correlation_xy=0.6703214...,
variance_x=1.6495217..., variance_y=0.5)
"""
return Stats(
*[
_sqrt(s) for s in astuple(
distance_stats_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
],
)
def distance_correlation_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the squared distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the biased estimator of the squared distance correlation.
See Also:
distance_correlation
u_distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_sqr(a, a)
1.0
>>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS
0.2773500...
>>> dcor.distance_correlation_sqr(b, b)
1.0
>>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.4493308...
"""
method = _to_algorithm(method)
return method.value.dcor_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_correlation_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Bias-corrected estimator for the squared distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the bias-corrected estimator of the squared distance
correlation.
See Also:
distance_correlation
distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_correlation_sqr(a, a)
1.0
>>> dcor.u_distance_correlation_sqr(a, b)
-0.5
>>> dcor.u_distance_correlation_sqr(b, b)
1.0
>>> dcor.u_distance_correlation_sqr(a, b, exponent=0.5)
... # doctest: +ELLIPSIS
-0.4050479...
"""
method = _to_algorithm(method)
return method.value.u_dcor_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_correlation(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the biased estimator of the distance correlation.
See Also:
distance_correlation_sqr
u_distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation(a, a)
1.0
>>> dcor.distance_correlation(a, b) # doctest: +ELLIPSIS
0.5266403...
>>> dcor.distance_correlation(b, b)
1.0
>>> dcor.distance_correlation(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.6703214...
"""
return _sqrt(
distance_correlation_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
def distance_correlation_af_inv_sqr(
x: T,
y: T,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Square of the affinely invariant distance correlation.
Computes the estimator for the square of the affinely invariant distance
correlation between two random vectors.
Warning:
The return value of this function is undefined when the
covariance matrix of :math:`x` or :math:`y` is singular.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the estimator of the squared affinely invariant
distance correlation.
See Also:
distance_correlation
u_distance_correlation
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 3., 2., 5.],
... [5., 7., 6., 8.],
... [9., 10., 11., 12.],
... [13., 15., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_af_inv_sqr(a, a)
1.0
>>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS
0.5773502...
>>> dcor.distance_correlation_af_inv_sqr(b, b)
1.0
"""
x = _af_inv_scaled(x)
y = _af_inv_scaled(y)
correlation = distance_correlation_sqr(
x,
y,
method=method,
compile_mode=compile_mode,
)
xp = get_namespace(x, y)
return (
xp.asarray(0, dtype=correlation.dtype)
if xp.isnan(correlation)
else correlation
)
def distance_correlation_af_inv(
x: T,
y: T,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Affinely invariant distance correlation.
Computes the estimator for the affinely invariant distance
correlation between two random vectors.
Warning:
The return value of this function is undefined when the
covariance matrix of :math:`x` or :math:`y` is singular.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the estimator of the squared affinely invariant
distance correlation.
See Also:
distance_correlation
u_distance_correlation
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 3., 2., 5.],
... [5., 7., 6., 8.],
... [9., 10., 11., 12.],
... [13., 15., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_af_inv(a, a)
1.0
>>> dcor.distance_correlation_af_inv(a, b) # doctest: +ELLIPSIS
0.7598356...
>>> dcor.distance_correlation_af_inv(b, b)
1.0
"""
return _sqrt(
distance_correlation_af_inv_sqr(
x,
y,
method=method,
compile_mode=compile_mode,
),
) | dcor/_dcor.py | from __future__ import annotations
from dataclasses import astuple, dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Generic, Iterator, TypeVar
from dcor._dcor_internals import _af_inv_scaled
from ._dcor_internals import (
MatrixCentered,
_distance_matrix,
_u_distance_matrix,
mean_product,
u_product,
)
from ._fast_dcov_avl import _distance_covariance_sqr_avl_generic
from ._fast_dcov_mergesort import _distance_covariance_sqr_mergesort_generic
from ._utils import ArrayType, CompileMode, _sqrt, get_namespace
T = TypeVar("T", bound=ArrayType)
if TYPE_CHECKING:
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol
else:
Protocol = object
class DCovFunction(Protocol):
"""Callback protocol for centering method."""
def __call__(self, __x: T, __y: T, *, compile_mode: CompileMode) -> T:
...
class _DcovAlgorithmInternals():
def __init__(
self,
*,
dcov_sqr=None,
u_dcov_sqr=None,
dcor_sqr=None,
u_dcor_sqr=None,
stats_sqr=None,
u_stats_sqr=None,
dcov_generic=None,
stats_generic=None,
):
# Dcov and U-Dcov
if dcov_generic is not None:
self.dcov_sqr = (
lambda *args, **kwargs: dcov_generic(
*args,
**kwargs,
unbiased=False,
)
)
self.u_dcov_sqr = (
lambda *args, **kwargs: dcov_generic(
*args,
**kwargs,
unbiased=True,
)
)
else:
self.dcov_sqr = dcov_sqr
self.u_dcov_sqr = u_dcov_sqr
# Stats
if stats_sqr is not None:
self.stats_sqr = stats_sqr
else:
if stats_generic is None:
self.stats_sqr = (
lambda *args, **kwargs: _distance_stats_sqr_generic(
*args,
**kwargs,
dcov_function=self.dcov_sqr,
)
)
else:
self.stats_sqr = (
lambda *args, **kwargs: stats_generic(
*args,
**kwargs,
matrix_centered=_distance_matrix,
product=mean_product,
)
)
# U-Stats
if u_stats_sqr is not None:
self.u_stats_sqr = u_stats_sqr
else:
if stats_generic is None:
self.u_stats_sqr = (
lambda *args, **kwargs: _distance_stats_sqr_generic(
*args,
**kwargs,
dcov_function=self.u_dcov_sqr,
)
)
else:
self.u_stats_sqr = (
lambda *args, **kwargs: stats_generic(
*args,
**kwargs,
matrix_centered=_u_distance_matrix,
product=u_product,
)
)
# Dcor
if dcor_sqr is not None:
self.dcor_sqr = dcor_sqr
else:
self.dcor_sqr = lambda *args, **kwargs: self.stats_sqr(
*args,
**kwargs,
).correlation_xy
# U-Dcor
if u_dcor_sqr is not None:
self.u_dcor_sqr = u_dcor_sqr
else:
self.u_dcor_sqr = lambda *args, **kwargs: self.u_stats_sqr(
*args,
**kwargs,
).correlation_xy
class _DcovAlgorithmInternalsAuto():
def _dispatch(
self,
x: T,
y: T,
*,
method: str,
exponent: float,
**kwargs: Any,
) -> Any:
if _can_use_fast_algorithm(x, y, exponent):
return getattr(DistanceCovarianceMethod.AVL.value, method)(
x,
y,
exponent=exponent,
**kwargs,
)
else:
return getattr(
DistanceCovarianceMethod.NAIVE.value, method)(
x,
y,
exponent=exponent,
**kwargs,
)
def __getattr__(self, method: str) -> Any:
if method[0] != '_':
return lambda *args, **kwargs: self._dispatch(
*args,
**kwargs,
method=method,
)
else:
raise AttributeError(
f"{self.__class__.__name__!r} object has no "
f"attribute {method!r}",
)
@dataclass(frozen=True)
class Stats(Generic[T]):
"""Distance covariance related stats."""
covariance_xy: T
correlation_xy: T
variance_x: T
variance_y: T
def __iter__(self) -> Iterator[T]:
return iter(astuple(self))
def _naive_check_compile_mode(compile_mode: CompileMode) -> None:
"""Check that compile mode is AUTO or NO_COMPILE and raises otherwise."""
if compile_mode not in (CompileMode.AUTO, CompileMode.NO_COMPILE):
raise NotImplementedError(
f"Compile mode {compile_mode} not implemented.",
)
def _distance_covariance_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Naive biased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
_naive_check_compile_mode(compile_mode)
a = _distance_matrix(x, exponent=exponent)
b = _distance_matrix(y, exponent=exponent)
return mean_product(a, b)
def _u_distance_covariance_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Naive unbiased estimator for distance covariance.
Computes the unbiased estimator for distance covariance between two
matrices, using an :math:`O(N^2)` algorithm.
"""
_naive_check_compile_mode(compile_mode)
a = _u_distance_matrix(x, exponent=exponent)
b = _u_distance_matrix(y, exponent=exponent)
return u_product(a, b)
def _distance_sqr_stats_naive_generic(
x: T,
y: T,
matrix_centered: MatrixCentered,
product: Callable[[T, T], T],
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""Compute generic squared stats."""
_naive_check_compile_mode(compile_mode)
a = matrix_centered(x, exponent=exponent)
b = matrix_centered(y, exponent=exponent)
covariance_xy_sqr = product(a, b)
variance_x_sqr = product(a, a)
variance_y_sqr = product(b, b)
xp = get_namespace(x, y)
denominator_sqr = xp.abs(variance_x_sqr * variance_y_sqr)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = xp.asarray(0, dtype=covariance_xy_sqr.dtype)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(
covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr,
)
def _distance_correlation_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""Biased distance correlation estimator between two matrices."""
_naive_check_compile_mode(compile_mode)
return _distance_sqr_stats_naive_generic(
x,
y,
matrix_centered=_distance_matrix,
product=mean_product,
exponent=exponent,
).correlation_xy
def _u_distance_correlation_sqr_naive(
x: T,
y: T,
exponent: float = 1,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""Bias-corrected distance correlation estimator between two matrices."""
_naive_check_compile_mode(compile_mode)
return _distance_sqr_stats_naive_generic(
x,
y,
matrix_centered=_u_distance_matrix,
product=u_product,
exponent=exponent,
).correlation_xy
def _is_random_variable(x: T) -> bool:
"""
Check if the matrix x correspond to a random variable.
The matrix is considered a random variable if it is a vector
or a matrix corresponding to a column vector. Otherwise,
the matrix correspond to a random vector.
"""
return len(x.shape) == 1 or x.shape[1] == 1
def _can_use_fast_algorithm(x: T, y: T, exponent: float = 1) -> bool:
"""
Check if the fast algorithm for distance stats can be used.
The fast algorithm has complexity :math:`O(NlogN)`, better than the
complexity of the naive algorithm (:math:`O(N^2)`).
The algorithm can only be used for random variables (not vectors) where
the number of instances is greater than 3. Also, the exponent must be 1.
"""
return (
_is_random_variable(x) and _is_random_variable(y)
and x.shape[0] > 3 and y.shape[0] > 3 and exponent == 1
)
def _distance_stats_sqr_generic(
x: T,
y: T,
*,
exponent: float = 1,
dcov_function: DCovFunction,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""Compute the distance stats using a dcov algorithm."""
if exponent != 1:
raise ValueError(f"Exponent should be 1 but is {exponent} instead.")
xp = get_namespace(x, y)
covariance_xy_sqr = dcov_function(x, y, compile_mode=compile_mode)
variance_x_sqr = dcov_function(x, x, compile_mode=compile_mode)
variance_y_sqr = dcov_function(y, y, compile_mode=compile_mode)
denominator_sqr_signed = variance_x_sqr * variance_y_sqr
denominator_sqr = xp.abs(denominator_sqr_signed)
denominator = _sqrt(denominator_sqr)
# Comparisons using a tolerance can change results if the
# covariance has a similar order of magnitude
if denominator == 0.0:
correlation_xy_sqr = xp.asarray(0, dtype=covariance_xy_sqr.dtype)
else:
correlation_xy_sqr = covariance_xy_sqr / denominator
return Stats(
covariance_xy=covariance_xy_sqr,
correlation_xy=correlation_xy_sqr,
variance_x=variance_x_sqr,
variance_y=variance_y_sqr,
)
class DistanceCovarianceMethod(Enum):
"""
Method used for computing the distance covariance.
"""
AUTO = _DcovAlgorithmInternalsAuto()
"""
Try to select the best algorithm. It will try to use a fast
algorithm if possible. Otherwise it will use the naive
implementation.
"""
NAIVE = _DcovAlgorithmInternals(
dcov_sqr=_distance_covariance_sqr_naive,
u_dcov_sqr=_u_distance_covariance_sqr_naive,
dcor_sqr=_distance_correlation_sqr_naive,
u_dcor_sqr=_u_distance_correlation_sqr_naive,
stats_generic=_distance_sqr_stats_naive_generic,
)
r"""
Use the usual estimator of the distance covariance, which is
:math:`O(n^2)`
"""
AVL = _DcovAlgorithmInternals(
dcov_generic=_distance_covariance_sqr_avl_generic,
)
r"""
Use the fast implementation from
:cite:`b-fast_distance_correlation_avl` which is
:math:`O(n\log n)`
"""
MERGESORT = _DcovAlgorithmInternals(
dcov_generic=_distance_covariance_sqr_mergesort_generic,
)
r"""
Use the fast implementation from
:cite:`b-fast_distance_correlation_mergesort` which is
:math:`O(n\log n)`
"""
def __repr__(self) -> str:
return '%s.%s' % (self.__class__.__name__, self.name)
def _to_algorithm(
algorithm: DistanceCovarianceMethod | str,
) -> DistanceCovarianceMethod:
"""Convert to algorithm if string."""
if isinstance(algorithm, DistanceCovarianceMethod):
return algorithm
return DistanceCovarianceMethod[algorithm.upper()]
def distance_covariance_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the squared distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Biased estimator of the squared distance covariance.
See Also:
distance_covariance
u_distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904...
"""
method = _to_algorithm(method)
return method.value.dcov_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_covariance_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Unbiased estimator for the squared distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the unbiased estimator of the squared distance covariance.
See Also:
distance_covariance
distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_covariance_sqr(a, a) # doctest: +ELLIPSIS
42.6666666...
>>> dcor.u_distance_covariance_sqr(a, b) # doctest: +ELLIPSIS
-2.6666666...
>>> dcor.u_distance_covariance_sqr(b, b) # doctest: +ELLIPSIS
0.6666666...
>>> dcor.u_distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
-0.2996598...
"""
method = _to_algorithm(method)
return method.value.u_dcov_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_covariance(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the distance covariance.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Biased estimator of the distance covariance.
See Also:
distance_covariance_sqr
u_distance_covariance_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_covariance(a, a) # doctest: +ELLIPSIS
7.2111025...
>>> dcor.distance_covariance(a, b)
1.0
>>> dcor.distance_covariance(b, b)
0.5
>>> dcor.distance_covariance(a, b, exponent=0.5)
0.6087614...
"""
return _sqrt(
distance_covariance_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
def distance_stats_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""
Usual (biased) statistics related with the squared distance covariance.
Computes the usual (biased) estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing squared distance covariance,
squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also:
distance_covariance_sqr
distance_correlation_sqr
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0,
variance_y=52.0)
>>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.2773500...,
variance_x=52.0, variance_y=0.25)
>>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25,
variance_y=0.25)
>>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308...,
variance_x=2.7209220..., variance_y=0.25)
"""
method = _to_algorithm(method)
return method.value.stats_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_stats_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Unbiased statistics related with the squared distance covariance.
Computes the unbiased estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing squared distance covariance,
squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also:
u_distance_covariance_sqr
u_distance_correlation_sqr
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_stats_sqr(a, a) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=42.6666666..., correlation_xy=1.0,
variance_x=42.6666666..., variance_y=42.6666666...)
>>> dcor.u_distance_stats_sqr(a, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-2.6666666..., correlation_xy=-0.5,
variance_x=42.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(b, b) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.6666666..., correlation_xy=1.0,
variance_x=0.6666666..., variance_y=0.6666666...)
>>> dcor.u_distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=-0.2996598..., correlation_xy=-0.4050479...,
variance_x=0.8209855..., variance_y=0.6666666...)
"""
method = _to_algorithm(method)
return method.value.u_stats_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_stats(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> Stats[T]:
"""
Usual (biased) statistics related with the distance covariance.
Computes the usual (biased) estimators for the distance covariance
and distance correlation between two random vectors, and the
individual distance variances.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Stats object containing distance covariance,
distance correlation,
distance variance of the first random vector and
distance variance of the second random vector.
See Also:
distance_covariance
distance_correlation
Notes:
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_stats(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=7.2111025..., correlation_xy=1.0,
variance_x=7.2111025..., variance_y=7.2111025...)
>>> dcor.distance_stats(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.5266403...,
variance_x=7.2111025..., variance_y=0.5)
>>> dcor.distance_stats(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.5, correlation_xy=1.0, variance_x=0.5,
variance_y=0.5)
>>> dcor.distance_stats(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.6087614..., correlation_xy=0.6703214...,
variance_x=1.6495217..., variance_y=0.5)
"""
return Stats(
*[
_sqrt(s) for s in astuple(
distance_stats_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
],
)
def distance_correlation_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the squared distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the biased estimator of the squared distance correlation.
See Also:
distance_correlation
u_distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_sqr(a, a)
1.0
>>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS
0.2773500...
>>> dcor.distance_correlation_sqr(b, b)
1.0
>>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.4493308...
"""
method = _to_algorithm(method)
return method.value.dcor_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def u_distance_correlation_sqr(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Bias-corrected estimator for the squared distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the bias-corrected estimator of the squared distance
correlation.
See Also:
distance_correlation
distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.u_distance_correlation_sqr(a, a)
1.0
>>> dcor.u_distance_correlation_sqr(a, b)
-0.5
>>> dcor.u_distance_correlation_sqr(b, b)
1.0
>>> dcor.u_distance_correlation_sqr(a, b, exponent=0.5)
... # doctest: +ELLIPSIS
-0.4050479...
"""
method = _to_algorithm(method)
return method.value.u_dcor_sqr(
x,
y,
exponent=exponent,
compile_mode=compile_mode,
)
def distance_correlation(
x: T,
y: T,
*,
exponent: float = 1,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Usual (biased) estimator for the distance correlation.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
exponent: Exponent of the Euclidean distance, in the range
:math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of
fractional Brownian motion.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the biased estimator of the distance correlation.
See Also:
distance_correlation_sqr
u_distance_correlation_sqr
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.],
... [13., 14., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation(a, a)
1.0
>>> dcor.distance_correlation(a, b) # doctest: +ELLIPSIS
0.5266403...
>>> dcor.distance_correlation(b, b)
1.0
>>> dcor.distance_correlation(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.6703214...
"""
return _sqrt(
distance_correlation_sqr(
x,
y,
exponent=exponent,
method=method,
compile_mode=compile_mode,
),
)
def distance_correlation_af_inv_sqr(
x: T,
y: T,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Square of the affinely invariant distance correlation.
Computes the estimator for the square of the affinely invariant distance
correlation between two random vectors.
Warning:
The return value of this function is undefined when the
covariance matrix of :math:`x` or :math:`y` is singular.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the estimator of the squared affinely invariant
distance correlation.
See Also:
distance_correlation
u_distance_correlation
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 3., 2., 5.],
... [5., 7., 6., 8.],
... [9., 10., 11., 12.],
... [13., 15., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_af_inv_sqr(a, a)
1.0
>>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS
0.5773502...
>>> dcor.distance_correlation_af_inv_sqr(b, b)
1.0
"""
x = _af_inv_scaled(x)
y = _af_inv_scaled(y)
correlation = distance_correlation_sqr(
x,
y,
method=method,
compile_mode=compile_mode,
)
xp = get_namespace(x, y)
return (
xp.asarray(0, dtype=correlation.dtype)
if xp.isnan(correlation)
else correlation
)
def distance_correlation_af_inv(
x: T,
y: T,
method: DistanceCovarianceMethod | str = DistanceCovarianceMethod.AUTO,
compile_mode: CompileMode = CompileMode.AUTO,
) -> T:
"""
Affinely invariant distance correlation.
Computes the estimator for the affinely invariant distance
correlation between two random vectors.
Warning:
The return value of this function is undefined when the
covariance matrix of :math:`x` or :math:`y` is singular.
Args:
x: First random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
y: Second random vector. The columns correspond with the individual
random variables while the rows are individual instances of the
random vector.
method: Method to use internally to compute the distance covariance.
compile_mode: Compilation mode used. By default it tries to use the
fastest available type of compilation.
Returns:
Value of the estimator of the squared affinely invariant
distance correlation.
See Also:
distance_correlation
u_distance_correlation
Examples:
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1., 3., 2., 5.],
... [5., 7., 6., 8.],
... [9., 10., 11., 12.],
... [13., 15., 15., 16.]])
>>> b = np.array([[1.], [0.], [0.], [1.]])
>>> dcor.distance_correlation_af_inv(a, a)
1.0
>>> dcor.distance_correlation_af_inv(a, b) # doctest: +ELLIPSIS
0.7598356...
>>> dcor.distance_correlation_af_inv(b, b)
1.0
"""
return _sqrt(
distance_correlation_af_inv_sqr(
x,
y,
method=method,
compile_mode=compile_mode,
),
) | 0.919643 | 0.166337 |
import argparse
import logging
import os
import pandas as pd
from vlne.eval.eval import eval_model
from vlne.data.data_generator import DataSmear
from vlne.plot.profile import plot_profile
from vlne.presets import PRESETS_EVAL
from vlne.utils import setup_logging
from vlne.utils.eval import standard_eval_prologue, parse_binning
from vlne.utils.parsers import (
add_basic_eval_args, add_concurrency_parser, add_hist_binning_parser
)
from vlne.data.data_generator.keras_sequence import KerasSequence
from vlne.plot.plot_spec import PlotSpec
def make_hist_specs(cmdargs, preset):
binning = parse_binning(cmdargs, suffix = '_x')
return {
label : PlotSpec(**binning)
for (label, name) in preset.name_map.items()
}
def add_energy_resolution_parser(parser):
parser.add_argument(
'--fit_margin',
help = 'Fraction of resolution peak to fit gaussian',
default = 0.5,
dest = 'fit_margin',
type = float,
)
add_hist_binning_parser(
parser,
default_range_lo = -1,
default_range_hi = 1,
default_bins = 100,
)
def parse_cmdargs():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
"Make input importance(based on output sensitivity to random"
" input perturbations) profiles"
)
add_basic_eval_args(parser, PRESETS_EVAL)
add_concurrency_parser(parser)
parser.add_argument(
'--smear',
help = 'Smearing Value',
default = 0.5,
dest = 'smear',
type = float,
)
parser.add_argument(
'-a', '--annotate',
action = 'store_true',
dest = 'annotate',
help = 'Show y-values for each point'
)
add_energy_resolution_parser(parser)
return parser.parse_args()
def prologue(cmdargs):
"""Load dataset, model and initialize output directory"""
dgen, args, model, outdir, plotdir, preset = \
standard_eval_prologue(cmdargs, PRESETS_EVAL)
plotdir = os.path.join(
outdir, f'input_importance_perturb_{cmdargs.smear:.3e}'
)
os.makedirs(plotdir, exist_ok = True)
return (args, model, dgen, plotdir, preset)
def get_stats_for_energy(stat_list, energy):
"""Extract stats for a given energy type"""
return pd.DataFrame([ x[energy] for x in stat_list ])
def plot_vars_profile(
var_list, stat_list, label_x, annotate, preset, plotdir, ext,
stats_to_plot = ( 'rms', 'sigma' ),
):
for energy in stat_list[0].keys():
for stat in stats_to_plot:
label_y = '%s(%s [%s])' % (
stat, preset.name_map[energy], preset.units_map[energy]
)
fname = f"{stat}_{energy}_vs_{label_x}"
fname += "_ann" if annotate else ''
fname = os.path.join(plotdir, fname)
stats = get_stats_for_energy(stat_list, energy)
plot_profile(
var_list, stats, stat,
base_stats = None,
label_x = label_x,
label_y = label_y,
sort_type = 'y',
annotate = annotate,
categorical = True,
fname = fname,
ext = ext
)
def slice_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared slice level input variable"""
if dgen.vars_input_slice is None:
return None
for vname in dgen.vars_input_slice:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_slice = [ vname ]
)
yield (vname, dg_smear)
def png2d_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared 2D prong level input variable"""
if dgen.vars_input_png2d is None:
return None
for vname in dgen.vars_input_png2d:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_png2d = [ vname ]
)
yield (vname, dg_smear)
def png3d_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared 3D prong level input variable"""
if dgen.vars_input_png3d is None:
return None
for vname in dgen.vars_input_png3d:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_png3d = [ vname ]
)
yield (vname, dg_smear)
def save_stats(var_list, stat_list, label, plotdir):
"""Save input importance stats vs input variable"""
result = []
for idx,var in enumerate(var_list):
for k,v in stat_list[idx].items():
result.append({ 'var' : var, 'energy' : k, **v })
df = pd.DataFrame.from_records(result, index = ('var', 'energy'))
df.to_csv('%s/stats_%s.csv' % (plotdir, label))
def make_perturb_profile(
smeared_var_generator, var_list, stat_list, args, model, hist_specs,
preset, plotdir, label, cmdargs
):
"""
Evaluate performance for generators yielded by `smeared_var_generator`
"""
if smeared_var_generator is None:
return
var_list = var_list[:]
stat_list = stat_list[:]
for (vname, dgen) in smeared_var_generator:
logging.info("Evaluating '%s' var...", vname)
stats, _ = eval_model(
args, KerasSequence(dgen), model, hist_specs, cmdargs.fit_margin
)
var_list .append(f"{vname} : {cmdargs.smear}")
stat_list.append(stats)
plot_vars_profile(
var_list, stat_list, label, cmdargs.annotate, preset, plotdir,
cmdargs.ext
)
save_stats(var_list, stat_list, label, plotdir)
def main():
setup_logging()
cmdargs = parse_cmdargs()
args, model, dgen, plotdir, preset = prologue(cmdargs)
hist_specs = make_hist_specs(cmdargs, preset)
var_list = [ 'none' ]
stat_list = [ eval_model(args, dgen, model, hist_specs)[0] ]
make_perturb_profile(
slice_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'slice', cmdargs
)
make_perturb_profile(
png2d_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'png2d', cmdargs
)
make_perturb_profile(
png3d_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'png3d', cmdargs
)
if __name__ == '__main__':
main() | scripts/studies/make_input_importance_plot_via_perturb.py | import argparse
import logging
import os
import pandas as pd
from vlne.eval.eval import eval_model
from vlne.data.data_generator import DataSmear
from vlne.plot.profile import plot_profile
from vlne.presets import PRESETS_EVAL
from vlne.utils import setup_logging
from vlne.utils.eval import standard_eval_prologue, parse_binning
from vlne.utils.parsers import (
add_basic_eval_args, add_concurrency_parser, add_hist_binning_parser
)
from vlne.data.data_generator.keras_sequence import KerasSequence
from vlne.plot.plot_spec import PlotSpec
def make_hist_specs(cmdargs, preset):
binning = parse_binning(cmdargs, suffix = '_x')
return {
label : PlotSpec(**binning)
for (label, name) in preset.name_map.items()
}
def add_energy_resolution_parser(parser):
parser.add_argument(
'--fit_margin',
help = 'Fraction of resolution peak to fit gaussian',
default = 0.5,
dest = 'fit_margin',
type = float,
)
add_hist_binning_parser(
parser,
default_range_lo = -1,
default_range_hi = 1,
default_bins = 100,
)
def parse_cmdargs():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
"Make input importance(based on output sensitivity to random"
" input perturbations) profiles"
)
add_basic_eval_args(parser, PRESETS_EVAL)
add_concurrency_parser(parser)
parser.add_argument(
'--smear',
help = 'Smearing Value',
default = 0.5,
dest = 'smear',
type = float,
)
parser.add_argument(
'-a', '--annotate',
action = 'store_true',
dest = 'annotate',
help = 'Show y-values for each point'
)
add_energy_resolution_parser(parser)
return parser.parse_args()
def prologue(cmdargs):
"""Load dataset, model and initialize output directory"""
dgen, args, model, outdir, plotdir, preset = \
standard_eval_prologue(cmdargs, PRESETS_EVAL)
plotdir = os.path.join(
outdir, f'input_importance_perturb_{cmdargs.smear:.3e}'
)
os.makedirs(plotdir, exist_ok = True)
return (args, model, dgen, plotdir, preset)
def get_stats_for_energy(stat_list, energy):
"""Extract stats for a given energy type"""
return pd.DataFrame([ x[energy] for x in stat_list ])
def plot_vars_profile(
var_list, stat_list, label_x, annotate, preset, plotdir, ext,
stats_to_plot = ( 'rms', 'sigma' ),
):
for energy in stat_list[0].keys():
for stat in stats_to_plot:
label_y = '%s(%s [%s])' % (
stat, preset.name_map[energy], preset.units_map[energy]
)
fname = f"{stat}_{energy}_vs_{label_x}"
fname += "_ann" if annotate else ''
fname = os.path.join(plotdir, fname)
stats = get_stats_for_energy(stat_list, energy)
plot_profile(
var_list, stats, stat,
base_stats = None,
label_x = label_x,
label_y = label_y,
sort_type = 'y',
annotate = annotate,
categorical = True,
fname = fname,
ext = ext
)
def slice_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared slice level input variable"""
if dgen.vars_input_slice is None:
return None
for vname in dgen.vars_input_slice:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_slice = [ vname ]
)
yield (vname, dg_smear)
def png2d_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared 2D prong level input variable"""
if dgen.vars_input_png2d is None:
return None
for vname in dgen.vars_input_png2d:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_png2d = [ vname ]
)
yield (vname, dg_smear)
def png3d_var_generator(dgen, smear):
"""Yield IDataGeneator with smeared 3D prong level input variable"""
if dgen.vars_input_png3d is None:
return None
for vname in dgen.vars_input_png3d:
dg_smear = DataSmear(
dgen, smear = smear, affected_vars_png3d = [ vname ]
)
yield (vname, dg_smear)
def save_stats(var_list, stat_list, label, plotdir):
"""Save input importance stats vs input variable"""
result = []
for idx,var in enumerate(var_list):
for k,v in stat_list[idx].items():
result.append({ 'var' : var, 'energy' : k, **v })
df = pd.DataFrame.from_records(result, index = ('var', 'energy'))
df.to_csv('%s/stats_%s.csv' % (plotdir, label))
def make_perturb_profile(
smeared_var_generator, var_list, stat_list, args, model, hist_specs,
preset, plotdir, label, cmdargs
):
"""
Evaluate performance for generators yielded by `smeared_var_generator`
"""
if smeared_var_generator is None:
return
var_list = var_list[:]
stat_list = stat_list[:]
for (vname, dgen) in smeared_var_generator:
logging.info("Evaluating '%s' var...", vname)
stats, _ = eval_model(
args, KerasSequence(dgen), model, hist_specs, cmdargs.fit_margin
)
var_list .append(f"{vname} : {cmdargs.smear}")
stat_list.append(stats)
plot_vars_profile(
var_list, stat_list, label, cmdargs.annotate, preset, plotdir,
cmdargs.ext
)
save_stats(var_list, stat_list, label, plotdir)
def main():
setup_logging()
cmdargs = parse_cmdargs()
args, model, dgen, plotdir, preset = prologue(cmdargs)
hist_specs = make_hist_specs(cmdargs, preset)
var_list = [ 'none' ]
stat_list = [ eval_model(args, dgen, model, hist_specs)[0] ]
make_perturb_profile(
slice_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'slice', cmdargs
)
make_perturb_profile(
png2d_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'png2d', cmdargs
)
make_perturb_profile(
png3d_var_generator(dgen, cmdargs.smear), var_list, stat_list,
args, model, hist_specs, preset, plotdir, 'png3d', cmdargs
)
if __name__ == '__main__':
main() | 0.592431 | 0.211722 |
from creator.files.models import File
from creator.events.models import Event
from creator.studies.factories import StudyFactory
from django.contrib.auth import get_user_model
User = get_user_model()
UPDATE_FILE = """
mutation (
$kfId:String!,
$description: String!,
$name: String!,
$fileType: FileType!
) {
updateFile(
kfId: $kfId,
name: $name,
description:$description,
fileType: $fileType
) {
file { id kfId description name fileType }
}
}
"""
DELETE_FILE = """
mutation ($kfId: String!) {
deleteFile(kfId: $kfId) {
success
}
}
"""
def test_new_file_event(clients, db, upload_file):
"""
Test that new file uploads create new events for both files and versions
"""
client = clients.get("Administrators")
assert Event.objects.count() == 0
studies = StudyFactory.create_batch(1)
study_id = studies[-1].kf_id
resp = upload_file(study_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
file = File.objects.get(kf_id=file_id)
version = file.versions.first()
assert Event.objects.count() == 3
assert Event.objects.filter(event_type="SF_CRE").count() == 1
assert Event.objects.filter(event_type="FV_CRE").count() == 1
assert Event.objects.filter(event_type="FV_UPD").count() == 1
user = User.objects.filter(groups__name="Administrators").first()
sf_cre = Event.objects.filter(event_type="SF_CRE").first()
assert sf_cre.user == user
assert sf_cre.file == file
assert sf_cre.study == studies[0]
fv_cre = Event.objects.filter(event_type="FV_CRE").first()
assert fv_cre.user == user
assert fv_cre.file is None
assert fv_cre.version == version
assert fv_cre.study == studies[0]
fv_upd = Event.objects.filter(event_type="FV_UPD").first()
assert fv_upd.user == user
assert fv_upd.file == file
assert fv_upd.version == version
assert fv_upd.study == studies[0]
def test_file_updated_event(db, clients, upload_file):
"""
Test that file updates create events
"""
client = clients.get("Administrators")
study = StudyFactory()
resp = upload_file(study.kf_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
assert Event.objects.count() == 3
variables = {
"kfId": file_id,
"name": "New name",
"description": "New description",
"fileType": "FAM",
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": UPDATE_FILE, "variables": variables},
)
user = User.objects.first()
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SF_UPD").count() == 1
assert Event.objects.filter(event_type="SF_UPD").first().user == user
assert (
Event.objects.filter(event_type="SF_UPD").first().study.kf_id
== study.kf_id
)
def test_file_deleted_event(db, clients, upload_file):
"""
Test that file deletions create events
"""
client = clients.get("Administrators")
study = StudyFactory()
resp = upload_file(study.kf_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
assert Event.objects.count() == 3
variables = {"kfId": file_id}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": DELETE_FILE, "variables": variables},
)
user = User.objects.first()
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SF_DEL").count() == 1
assert Event.objects.filter(event_type="SF_DEL").first().user == user
assert (
Event.objects.filter(event_type="SF_DEL").first().study.kf_id
== study.kf_id
) | tests/events/test_files.py | from creator.files.models import File
from creator.events.models import Event
from creator.studies.factories import StudyFactory
from django.contrib.auth import get_user_model
User = get_user_model()
UPDATE_FILE = """
mutation (
$kfId:String!,
$description: String!,
$name: String!,
$fileType: FileType!
) {
updateFile(
kfId: $kfId,
name: $name,
description:$description,
fileType: $fileType
) {
file { id kfId description name fileType }
}
}
"""
DELETE_FILE = """
mutation ($kfId: String!) {
deleteFile(kfId: $kfId) {
success
}
}
"""
def test_new_file_event(clients, db, upload_file):
"""
Test that new file uploads create new events for both files and versions
"""
client = clients.get("Administrators")
assert Event.objects.count() == 0
studies = StudyFactory.create_batch(1)
study_id = studies[-1].kf_id
resp = upload_file(study_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
file = File.objects.get(kf_id=file_id)
version = file.versions.first()
assert Event.objects.count() == 3
assert Event.objects.filter(event_type="SF_CRE").count() == 1
assert Event.objects.filter(event_type="FV_CRE").count() == 1
assert Event.objects.filter(event_type="FV_UPD").count() == 1
user = User.objects.filter(groups__name="Administrators").first()
sf_cre = Event.objects.filter(event_type="SF_CRE").first()
assert sf_cre.user == user
assert sf_cre.file == file
assert sf_cre.study == studies[0]
fv_cre = Event.objects.filter(event_type="FV_CRE").first()
assert fv_cre.user == user
assert fv_cre.file is None
assert fv_cre.version == version
assert fv_cre.study == studies[0]
fv_upd = Event.objects.filter(event_type="FV_UPD").first()
assert fv_upd.user == user
assert fv_upd.file == file
assert fv_upd.version == version
assert fv_upd.study == studies[0]
def test_file_updated_event(db, clients, upload_file):
"""
Test that file updates create events
"""
client = clients.get("Administrators")
study = StudyFactory()
resp = upload_file(study.kf_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
assert Event.objects.count() == 3
variables = {
"kfId": file_id,
"name": "New name",
"description": "New description",
"fileType": "FAM",
}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": UPDATE_FILE, "variables": variables},
)
user = User.objects.first()
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SF_UPD").count() == 1
assert Event.objects.filter(event_type="SF_UPD").first().user == user
assert (
Event.objects.filter(event_type="SF_UPD").first().study.kf_id
== study.kf_id
)
def test_file_deleted_event(db, clients, upload_file):
"""
Test that file deletions create events
"""
client = clients.get("Administrators")
study = StudyFactory()
resp = upload_file(study.kf_id, "manifest.txt", client)
file_id = resp.json()["data"]["createFile"]["file"]["kfId"]
assert Event.objects.count() == 3
variables = {"kfId": file_id}
resp = client.post(
"/graphql",
content_type="application/json",
data={"query": DELETE_FILE, "variables": variables},
)
user = User.objects.first()
assert Event.objects.count() == 4
assert Event.objects.filter(event_type="SF_DEL").count() == 1
assert Event.objects.filter(event_type="SF_DEL").first().user == user
assert (
Event.objects.filter(event_type="SF_DEL").first().study.kf_id
== study.kf_id
) | 0.475118 | 0.328785 |
from django.db import models
# Creating Database models for Market Indicies:
# The SPY 500 Index Composition:
class SPYIndexComposition(models.Model):
"""A data model representing a database table containing information
on the S&P500 market index composition.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the stock in the index.
security_name (models.CharField): The full name of the stock in the index.
gics_sector (models.CharField): The Global Industry Classification Standard
category that the stock belongs to.
gics_sub_industry (models.CharField): The Global Industry Classification Standard
sub industry category that the stock belongs to.
headquarters_location (models.CharField): The city where the company's headquarters
are located.
date_added (models.DateTimeField): The date that the stock was added to the index.
cik (models.IntegerField): The SEC CIK identification number for the stock.
founded (models.CharField): The year that the company was founded.
"""
symbol = models.CharField(max_length=10, unique=True)
security_name = models.CharField(max_length=100, unique=True)
gics_sector = models.CharField(max_length=100, null=True)
gics_sub_industry = models.CharField(max_length=100, null=True)
headquarters_location = models.CharField(max_length=100, null=True)
date_added = models.CharField(max_length=25, null=True)
cik = models.IntegerField(unique=True)
founded = models.CharField(max_length=50)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "SPY Market Index Composition"
# The Dow Jones Industrial Average Index Composition:
class DJIAIndexComposition(models.Model):
"""A data model representing a database table containing information
on the Dow Jones Industrial Average Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
company (models.CharField): The company name.
exchange (models.CharField): The exchange that the stock is a part of.
symbol (models.CharField): The ticker symbol of the company.
industry (models.CharField): The industry category that the company is a part of.
date_added (models.DateField): The date that the company was added to the DJIA.
notes (models.CharField): Any additional notes for the company.
weighting (models.CharField): What percentage of the index is made up of the company.
"""
company = models.CharField(max_length=150, unique=True)
exchange = models.CharField(max_length=15)
symbol = models.CharField(max_length=10, unique=True)
industry = models.CharField(max_length=150)
date_added = models.DateTimeField()
notes = models.CharField(max_length=200, null=True)
weighting = models.CharField(max_length=20, null=True)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "DJIA Market Index Composition"
# S&P/TSX Index Composition:
class SPTSXIndexComposition(models.Model):
"""A data model representing a database table containing information
on the S&P/TSX Composite Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
sector (models.CharField): The sector category that the company is a part of.
industry (models.CharField): The industry category that the company is a part of.
"""
symbol = models.CharField(max_length=20, unique=True)
company = models.CharField(max_length=200)
sector = models.CharField(max_length=100)
industry = models.CharField(max_length=200)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "S&P/TSX Market Index Composition"
# Financial Times Stock Exchange 100 Index Composition:
class FTSE100IndexComposition(models.Model):
"""A data model representing a database table containing information
on the Financial Times Stock Exchange 100 Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
company (models.CharField): The company name.
symbol (models.CharField): The ticker symbol of the company.
industry (models.CharField): The industry category that the company is a part of.
"""
company = models.CharField(max_length=150, unique=True)
symbol = models.CharField(max_length=10, unique=True)
industry = models.CharField(max_length=200)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "FTSE 100 Market Index Composition"
# Swiss Market Index Composition:
class SMIComposition(models.Model):
"""A data model representing a database table containing information
on the Swiss Market Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
rank (models.CharField): The rank of the company in the index.
company (models.CharField): The company name.
industry (models.CharField): The industry category that the company is a part of.
symbol (models.CharField): The ticker symbol of the company.
canton (models.CharField): The canton that the company is located.
weighting (models.CharField): What percentage of the index is made up of the company.
"""
rank = models.IntegerField()
company = models.CharField(max_length=100, unique=True)
industry = models.CharField(max_length=100)
symbol = models.CharField(max_length=10, unique=True)
canton = models.CharField(max_length=100)
weighting = models.CharField(max_length=20)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "Swiss Market Index Composition"
# Swiss Performance Index Composition:
class SPIComposition(models.Model):
"""A data model representing a database table containing information
on the Swiss Performance Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
smi_family (models.CharField): The swiss market index that the company is part of.
date_added (models.CharField): The year that the company was added to the index.
notes (models.CharField): Any additional notes assocaited to the company.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=100)
smi_family = models.CharField(max_length=100, null=True)
date_added = models.CharField(max_length=10, null=True)
notes = models.CharField(max_length=200, null=True)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "Swiss Performance Index Composition"
# The NYSE Market Index Composition:
class NYSEComposition(models.Model):
"""A data model representing a database table containing information
on the New-York Stock Exchange.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk. The model contains all the fields to directly
interact with the data provided from the nyse index composition csv.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
market_cap (models.IntegerField): The total market cap of the company.
country (models.CharField): The country in which the company is headquarterd.
ipo_year (models.PositiveSmallIntegerField): The year when the company went public
sector (models.CharField): The broad sector of the industry
industry (models.CharField): The more specific industry that the company is appart
of.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=300)
market_cap = models.FloatField(null=True)
country = models.CharField(max_length=150, null=True)
ipo_year = models.PositiveSmallIntegerField(null=True)
sector = models.CharField(max_length=150, null=True)
industry = models.CharField(max_length=150, null=True)
def __str__(self):
return self.company
class Meta:
verbose_name_plural = "NYSE Index Composition"
# NASDAQ Market Index composition:
class NASDAQComposition(models.Model):
"""A data model representing a database table containing information
on the NASDAQ.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk. The model contains all the fields to directly
interact with the data provided from the nasdaq index composition csv.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
market_cap (models.IntegerField): The total market cap of the company.
country (models.CharField): The country in which the company is headquarterd.
ipo_year (models.PositiveSmallIntegerField): The year when the company went public
sector (models.CharField): The broad sector of the industry
industry (models.CharField): The more specific industry that the company is appart
of.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=300)
market_cap = models.FloatField(null=True)
country = models.CharField(max_length=150, null=True)
ipo_year = models.PositiveSmallIntegerField(null=True)
sector = models.CharField(max_length=150, null=True)
industry = models.CharField(max_length=150, null=True)
def __str__(self):
return self.company
class Meta:
verbose_name_plural = "NASDAQ Index Composition" | velkozz_web_api/apps/finance_api/models/market_indicies/market_indicies_models.py | from django.db import models
# Creating Database models for Market Indicies:
# The SPY 500 Index Composition:
class SPYIndexComposition(models.Model):
"""A data model representing a database table containing information
on the S&P500 market index composition.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the stock in the index.
security_name (models.CharField): The full name of the stock in the index.
gics_sector (models.CharField): The Global Industry Classification Standard
category that the stock belongs to.
gics_sub_industry (models.CharField): The Global Industry Classification Standard
sub industry category that the stock belongs to.
headquarters_location (models.CharField): The city where the company's headquarters
are located.
date_added (models.DateTimeField): The date that the stock was added to the index.
cik (models.IntegerField): The SEC CIK identification number for the stock.
founded (models.CharField): The year that the company was founded.
"""
symbol = models.CharField(max_length=10, unique=True)
security_name = models.CharField(max_length=100, unique=True)
gics_sector = models.CharField(max_length=100, null=True)
gics_sub_industry = models.CharField(max_length=100, null=True)
headquarters_location = models.CharField(max_length=100, null=True)
date_added = models.CharField(max_length=25, null=True)
cik = models.IntegerField(unique=True)
founded = models.CharField(max_length=50)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "SPY Market Index Composition"
# The Dow Jones Industrial Average Index Composition:
class DJIAIndexComposition(models.Model):
"""A data model representing a database table containing information
on the Dow Jones Industrial Average Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
company (models.CharField): The company name.
exchange (models.CharField): The exchange that the stock is a part of.
symbol (models.CharField): The ticker symbol of the company.
industry (models.CharField): The industry category that the company is a part of.
date_added (models.DateField): The date that the company was added to the DJIA.
notes (models.CharField): Any additional notes for the company.
weighting (models.CharField): What percentage of the index is made up of the company.
"""
company = models.CharField(max_length=150, unique=True)
exchange = models.CharField(max_length=15)
symbol = models.CharField(max_length=10, unique=True)
industry = models.CharField(max_length=150)
date_added = models.DateTimeField()
notes = models.CharField(max_length=200, null=True)
weighting = models.CharField(max_length=20, null=True)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "DJIA Market Index Composition"
# S&P/TSX Index Composition:
class SPTSXIndexComposition(models.Model):
"""A data model representing a database table containing information
on the S&P/TSX Composite Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
sector (models.CharField): The sector category that the company is a part of.
industry (models.CharField): The industry category that the company is a part of.
"""
symbol = models.CharField(max_length=20, unique=True)
company = models.CharField(max_length=200)
sector = models.CharField(max_length=100)
industry = models.CharField(max_length=200)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "S&P/TSX Market Index Composition"
# Financial Times Stock Exchange 100 Index Composition:
class FTSE100IndexComposition(models.Model):
"""A data model representing a database table containing information
on the Financial Times Stock Exchange 100 Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
company (models.CharField): The company name.
symbol (models.CharField): The ticker symbol of the company.
industry (models.CharField): The industry category that the company is a part of.
"""
company = models.CharField(max_length=150, unique=True)
symbol = models.CharField(max_length=10, unique=True)
industry = models.CharField(max_length=200)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "FTSE 100 Market Index Composition"
# Swiss Market Index Composition:
class SMIComposition(models.Model):
"""A data model representing a database table containing information
on the Swiss Market Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
rank (models.CharField): The rank of the company in the index.
company (models.CharField): The company name.
industry (models.CharField): The industry category that the company is a part of.
symbol (models.CharField): The ticker symbol of the company.
canton (models.CharField): The canton that the company is located.
weighting (models.CharField): What percentage of the index is made up of the company.
"""
rank = models.IntegerField()
company = models.CharField(max_length=100, unique=True)
industry = models.CharField(max_length=100)
symbol = models.CharField(max_length=10, unique=True)
canton = models.CharField(max_length=100)
weighting = models.CharField(max_length=20)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "Swiss Market Index Composition"
# Swiss Performance Index Composition:
class SPIComposition(models.Model):
"""A data model representing a database table containing information
on the Swiss Performance Index.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
smi_family (models.CharField): The swiss market index that the company is part of.
date_added (models.CharField): The year that the company was added to the index.
notes (models.CharField): Any additional notes assocaited to the company.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=100)
smi_family = models.CharField(max_length=100, null=True)
date_added = models.CharField(max_length=10, null=True)
notes = models.CharField(max_length=200, null=True)
def __str__(self):
return self.symbol
class Meta:
verbose_name_plural = "Swiss Performance Index Composition"
# The NYSE Market Index Composition:
class NYSEComposition(models.Model):
"""A data model representing a database table containing information
on the New-York Stock Exchange.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk. The model contains all the fields to directly
interact with the data provided from the nyse index composition csv.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
market_cap (models.IntegerField): The total market cap of the company.
country (models.CharField): The country in which the company is headquarterd.
ipo_year (models.PositiveSmallIntegerField): The year when the company went public
sector (models.CharField): The broad sector of the industry
industry (models.CharField): The more specific industry that the company is appart
of.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=300)
market_cap = models.FloatField(null=True)
country = models.CharField(max_length=150, null=True)
ipo_year = models.PositiveSmallIntegerField(null=True)
sector = models.CharField(max_length=150, null=True)
industry = models.CharField(max_length=150, null=True)
def __str__(self):
return self.company
class Meta:
verbose_name_plural = "NYSE Index Composition"
# NASDAQ Market Index composition:
class NASDAQComposition(models.Model):
"""A data model representing a database table containing information
on the NASDAQ.
It is built for the velkozz API with the ETL pipeline API in mind for POST
requsts. There is no primary key declared, the model makes use of django's
automatic primary key pk. The model contains all the fields to directly
interact with the data provided from the nasdaq index composition csv.
Attributes:
symbol (models.CharField): The ticker symbol of the company.
company (models.CharField): The company name.
market_cap (models.IntegerField): The total market cap of the company.
country (models.CharField): The country in which the company is headquarterd.
ipo_year (models.PositiveSmallIntegerField): The year when the company went public
sector (models.CharField): The broad sector of the industry
industry (models.CharField): The more specific industry that the company is appart
of.
"""
symbol = models.CharField(max_length=10, unique=True)
company = models.CharField(max_length=300)
market_cap = models.FloatField(null=True)
country = models.CharField(max_length=150, null=True)
ipo_year = models.PositiveSmallIntegerField(null=True)
sector = models.CharField(max_length=150, null=True)
industry = models.CharField(max_length=150, null=True)
def __str__(self):
return self.company
class Meta:
verbose_name_plural = "NASDAQ Index Composition" | 0.820829 | 0.50238 |
import json
import os
import uuid
from typing import Any, Dict
import boto3 # type: ignore
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.logging.correlation_paths import API_GATEWAY_HTTP
logger = Logger()
tracer = Tracer()
# Pull out the DynamoDB table name from the environment
table_name = os.environ["TABLE_NAME"]
ddb = boto3.resource("dynamodb")
table = ddb.Table(table_name)
def text_response(message: str, code: int = 200) -> Dict[str, Any]:
"""Build text response
Parameters
----------
message : str
Message body
code :
Http status code
Returns
-------
Dict
API gateway response
"""
return {
"statusCode": code,
"headers": {"Content-Type": "text/plain"},
"body": message,
}
@tracer.capture_method
def create_short_url(event: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new shortened url
Parameters
----------
event : Dict
API Gateway Event
Returns
-------
Dict
API gateway response
"""
# Parse targetUrl
target_url = event["queryStringParameters"]["targetUrl"]
# Create a unique id (take first 8 chars)
slug_id = str(uuid.uuid4())[0:8]
# Create item in DynamoDB
table.put_item(Item={"id": slug_id, "target_url": target_url})
# Create the redirect URL
url = (
"https://"
+ event["requestContext"]["domainName"]
+ event["requestContext"]["path"]
+ slug_id
)
return text_response("Created URL: %s" % url)
@tracer.capture_method
def read_short_url(event: Dict[str, Any]) -> Dict[str, Any]:
"""Redirect to the destination url
Parameters
----------
event : Dict
API Gateway Event
Returns
-------
Dict
API gateway response
"""
# Parse redirect ID from path
slug_id = event["pathParameters"]["proxy"]
# Load redirect target from DynamoDB
response = table.get_item(Key={"id": slug_id})
logger.debug("RESPONSE: " + json.dumps(response))
item = response.get("Item", None)
if item is None:
return text_response("No redirect found for " + slug_id, 400)
# Respond with a redirect
return {"statusCode": 301, "headers": {"Location": item.get("target_url")}}
@tracer.capture_lambda_handler
@logger.inject_lambda_context(correlation_id_path=API_GATEWAY_HTTP)
def lambda_handler(event: Dict[str, Any], _):
"""Lambda event handler
Parameters
----------
event : Dict
API Gateway Response
_ : Any
Lambda Context
Returns
-------
Dict
API Gateway Response
"""
logger.info("EVENT: " + json.dumps(event))
query_string_params = event.get("queryStringParameters")
if query_string_params is not None:
target_url = query_string_params["targetUrl"]
if target_url is not None:
return create_short_url(event)
path_parameters = event.get("pathParameters")
if path_parameters is not None and path_parameters["proxy"] is not None:
return read_short_url(event)
return text_response("usage: ?targetUrl=URL") | src/app.py | import json
import os
import uuid
from typing import Any, Dict
import boto3 # type: ignore
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.logging.correlation_paths import API_GATEWAY_HTTP
logger = Logger()
tracer = Tracer()
# Pull out the DynamoDB table name from the environment
table_name = os.environ["TABLE_NAME"]
ddb = boto3.resource("dynamodb")
table = ddb.Table(table_name)
def text_response(message: str, code: int = 200) -> Dict[str, Any]:
"""Build text response
Parameters
----------
message : str
Message body
code :
Http status code
Returns
-------
Dict
API gateway response
"""
return {
"statusCode": code,
"headers": {"Content-Type": "text/plain"},
"body": message,
}
@tracer.capture_method
def create_short_url(event: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new shortened url
Parameters
----------
event : Dict
API Gateway Event
Returns
-------
Dict
API gateway response
"""
# Parse targetUrl
target_url = event["queryStringParameters"]["targetUrl"]
# Create a unique id (take first 8 chars)
slug_id = str(uuid.uuid4())[0:8]
# Create item in DynamoDB
table.put_item(Item={"id": slug_id, "target_url": target_url})
# Create the redirect URL
url = (
"https://"
+ event["requestContext"]["domainName"]
+ event["requestContext"]["path"]
+ slug_id
)
return text_response("Created URL: %s" % url)
@tracer.capture_method
def read_short_url(event: Dict[str, Any]) -> Dict[str, Any]:
"""Redirect to the destination url
Parameters
----------
event : Dict
API Gateway Event
Returns
-------
Dict
API gateway response
"""
# Parse redirect ID from path
slug_id = event["pathParameters"]["proxy"]
# Load redirect target from DynamoDB
response = table.get_item(Key={"id": slug_id})
logger.debug("RESPONSE: " + json.dumps(response))
item = response.get("Item", None)
if item is None:
return text_response("No redirect found for " + slug_id, 400)
# Respond with a redirect
return {"statusCode": 301, "headers": {"Location": item.get("target_url")}}
@tracer.capture_lambda_handler
@logger.inject_lambda_context(correlation_id_path=API_GATEWAY_HTTP)
def lambda_handler(event: Dict[str, Any], _):
"""Lambda event handler
Parameters
----------
event : Dict
API Gateway Response
_ : Any
Lambda Context
Returns
-------
Dict
API Gateway Response
"""
logger.info("EVENT: " + json.dumps(event))
query_string_params = event.get("queryStringParameters")
if query_string_params is not None:
target_url = query_string_params["targetUrl"]
if target_url is not None:
return create_short_url(event)
path_parameters = event.get("pathParameters")
if path_parameters is not None and path_parameters["proxy"] is not None:
return read_short_url(event)
return text_response("usage: ?targetUrl=URL") | 0.760517 | 0.116814 |
import pytest
from marshmallow import Schema, fields, RAISE, INCLUDE, EXCLUDE
from marshmallow_jsonschema import UnsupportedValueError, JSONSchema
from . import validate_and_dump
def test_additional_properties_default():
class TestSchema(Schema):
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert not dumped["definitions"]["TestSchema"]["additionalProperties"]
@pytest.mark.parametrize("additional_properties_value", (False, True))
def test_additional_properties_from_meta(additional_properties_value):
class TestSchema(Schema):
class Meta:
additional_properties = additional_properties_value
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestSchema"]["additionalProperties"]
== additional_properties_value
)
def test_additional_properties_invalid_value():
class TestSchema(Schema):
class Meta:
additional_properties = "foo"
foo = fields.Integer()
schema = TestSchema()
json_schema = JSONSchema()
with pytest.raises(UnsupportedValueError):
json_schema.dump(schema)
def test_additional_properties_nested_default():
class TestNestedSchema(Schema):
foo = fields.Integer()
class TestSchema(Schema):
nested = fields.Nested(TestNestedSchema())
schema = TestSchema()
dumped = validate_and_dump(schema)
assert not dumped["definitions"]["TestSchema"]["additionalProperties"]
@pytest.mark.parametrize("additional_properties_value", (False, True))
def test_additional_properties_from_nested_meta(additional_properties_value):
class TestNestedSchema(Schema):
class Meta:
additional_properties = additional_properties_value
foo = fields.Integer()
class TestSchema(Schema):
nested = fields.Nested(TestNestedSchema())
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestNestedSchema"]["additionalProperties"]
== additional_properties_value
)
@pytest.mark.parametrize(
"unknown_value, additional_properties",
((RAISE, False), (INCLUDE, True), (EXCLUDE, False)),
)
def test_additional_properties_deduced(unknown_value, additional_properties):
class TestSchema(Schema):
class Meta:
unknown = unknown_value
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestSchema"]["additionalProperties"]
== additional_properties
)
def test_additional_properties_unknown_invalid_value():
class TestSchema(Schema):
class Meta:
unknown = "foo"
foo = fields.Integer()
schema = TestSchema()
json_schema = JSONSchema()
with pytest.raises(UnsupportedValueError):
json_schema.dump(schema) | tests/test_additional_properties.py | import pytest
from marshmallow import Schema, fields, RAISE, INCLUDE, EXCLUDE
from marshmallow_jsonschema import UnsupportedValueError, JSONSchema
from . import validate_and_dump
def test_additional_properties_default():
class TestSchema(Schema):
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert not dumped["definitions"]["TestSchema"]["additionalProperties"]
@pytest.mark.parametrize("additional_properties_value", (False, True))
def test_additional_properties_from_meta(additional_properties_value):
class TestSchema(Schema):
class Meta:
additional_properties = additional_properties_value
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestSchema"]["additionalProperties"]
== additional_properties_value
)
def test_additional_properties_invalid_value():
class TestSchema(Schema):
class Meta:
additional_properties = "foo"
foo = fields.Integer()
schema = TestSchema()
json_schema = JSONSchema()
with pytest.raises(UnsupportedValueError):
json_schema.dump(schema)
def test_additional_properties_nested_default():
class TestNestedSchema(Schema):
foo = fields.Integer()
class TestSchema(Schema):
nested = fields.Nested(TestNestedSchema())
schema = TestSchema()
dumped = validate_and_dump(schema)
assert not dumped["definitions"]["TestSchema"]["additionalProperties"]
@pytest.mark.parametrize("additional_properties_value", (False, True))
def test_additional_properties_from_nested_meta(additional_properties_value):
class TestNestedSchema(Schema):
class Meta:
additional_properties = additional_properties_value
foo = fields.Integer()
class TestSchema(Schema):
nested = fields.Nested(TestNestedSchema())
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestNestedSchema"]["additionalProperties"]
== additional_properties_value
)
@pytest.mark.parametrize(
"unknown_value, additional_properties",
((RAISE, False), (INCLUDE, True), (EXCLUDE, False)),
)
def test_additional_properties_deduced(unknown_value, additional_properties):
class TestSchema(Schema):
class Meta:
unknown = unknown_value
foo = fields.Integer()
schema = TestSchema()
dumped = validate_and_dump(schema)
assert (
dumped["definitions"]["TestSchema"]["additionalProperties"]
== additional_properties
)
def test_additional_properties_unknown_invalid_value():
class TestSchema(Schema):
class Meta:
unknown = "foo"
foo = fields.Integer()
schema = TestSchema()
json_schema = JSONSchema()
with pytest.raises(UnsupportedValueError):
json_schema.dump(schema) | 0.581303 | 0.349533 |
import numpy as _np
import os as _os
import hyperopt as _hyperopt
import time as _time
import functools as _functools
import warnings as _warnings
import matplotlib.pyplot as _plt
import sklearn.model_selection as _sklearn_model_selection
from .. import NeuralNet as _NeuralNet
from ... import file_utils as _file_utils
from hyperopt import base as _base
_base.have_bson = False
class GridSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_GridSearchCV_dir = 'GridSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
hyperparameter GridSearchCV across different types of models
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary with formating like {metric name (str), metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_GridSearchCV_dir: root directory where the GridSearchCV outputs will be saved.
n_jobs: int. Default: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: verbosity of prints.
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_GridSearchCV_dir = path_GridSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
def load_NeuralNet(self, path_model_dir, X_train, y_train, epochs):
"""
load model_dict for Nueral Net case
"""
#fetch best params
best_params_ = self.load('best_params_', 'dill', path_model_dir)
#rebuild model_dict
model_dict = _NeuralNet.DenseNet.model_dict(**best_params_)
model_dict['best_model'] = _NeuralNet.utils.load_model(_os.path.join(path_model_dir,'best_estimator_.h5'))
model_dict['best_params'] = best_params_
model_dict['best_cv_score'] = _np.nan
return model_dict
def _single_model_GridSearchCV(self,
model_dict_,
X_train, y_train, X_test, y_test,
path_model_dir):
"""
Run Grid Search CV on a single model specified by the "key" argument
"""
type_model = str(type(model_dict_['model']))
type_X_train = str(type(X_train))
if ('sklearn' in type_model or 'xgboost' in type_model) and 'dask' not in type_X_train:
GridSearchCV = _sklearn_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
verbose = _np.max((0,self.verbose-1))
)
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
GridSearchCV.fit(X_train,y_train)
elif 'dask' in type_X_train:
from ..dask_ml_extend import model_selection as dask_ml_model_selection
GridSearchCV = dask_ml_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
)
GridSearchCV.fit(X_train, y_train)
else: #run gridsearch using neural net function
if self.scoring['metric'] == None:
self.scoring={'metric': 'loss', 'maximize': False}
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
GridSearchCV = _NeuralNet.search.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
cv = self.cv,
scoring=self.scoring,
epochs = epochs,
path_report_folder = path_model_dir,
verbose = _np.max((0,self.verbose-1))
)
GridSearchCV.fit(X_train, y_train, X_test, y_test)
model_dict_['best_model'] = GridSearchCV.best_estimator_
model_dict_['best_params'] = GridSearchCV.best_params_
model_dict_['best_cv_score'] = GridSearchCV.best_score_
if 'sklearn' in str(type(model_dict_['model'])):
self.save(model_dict_, 'model_dict', 'dill', path_model_dir)
return model_dict_
def fit(self,
X_train,
y_train,
X_test,
y_test):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
"""
#instantiate path_model_dirs dictionary so we can know where the models are saved
self.path_model_dirs = {}
for key in self.models_dict.keys():
if self.verbose >=1: print('\n----',key,'----')
#define model directory
path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)
self.path_model_dirs[key] = path_model_dir
if self.verbose >=1: print('path_model_dir:',path_model_dir)
model_type = type(self.models_dict[key]['model'])
if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_params_.dill')
if self.retrain or _os.path.isfile(path_file)==False:
self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key],
X_train, y_train,
X_test, y_test,
path_model_dir)
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key] = self.load_NeuralNet(path_model_dir,
X_train, y_train,
epochs)
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
else:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)
if self.verbose >=1:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key)
class BayesianSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_BayesianSearchCV_dir = 'BayesianSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
Hyperparameter BayesianSearchCV across different types of models. This class leverages the hyperopt API.
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/xgboost/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary of the form {metric name (str): metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_BayesianSearchCV_dir: root directory where the BayesianSearchCV outputs will be saved.
n_jobs: int. Defualt: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: print-out verbosity
Notes:
------
Technically, the optimization is performed using the tree-structured parzeen estimator approach, not a pure bayesian estimator. This approach is more efficient handling hyperparameter optimization tasks with high dimensions and small fitness evaluation budgets. See more details in the paper linked below
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_BayesianSearchCV_dir = path_BayesianSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
#define model directory
self.path_model_dirs = {}
for key in self.models_dict.keys():
self.path_model_dirs[key] = _os.path.join(self.path_BayesianSearchCV_dir, key)
def _build_space(self, param_grid):
"""
Build the hyperparameter space for input into hyperopt.fmin() function.
Arguments:
----------
param_grid: hyperparameter dictionary with key-list pairs.
Returns:
--------
space: dictionary with key-hyperopt.hp... pairs
Notes:
------
For each hyperparameter of interest, the max and min in the list of possible values in the param_grid[key] element is evaluated. If the difference between the number of decades between the min and max value is greater than 1, a uniform probability distribution will be sampled between log10(min) and log10(max). This will result in the prefixe 'log10.' being pre-pended to the key in the 'space' dict for the given hyperparameter under consideration.
For the case of non-numeric hyperparameters, the space[key] value will be assigned using the hyperopt.hp.choice() function, with the choices being in integer form (index), rather than their raw string value.
To convert the hyperparameters from hyperopts 'space' back to the parameters required by the model under evaluation, we run the function '_update_model_params()' in each instance of the 'objective' function evaluation.
"""
if self.verbose>9:
'Building param space...'
_warnings.filterwarnings('ignore')
param_grid = param_grid.copy()
space = {}
for key in param_grid.keys():
params = param_grid[key]
if self.verbose>9:
print('\tinput:',key, params)
type_str = str(type(params[0]))
if 'float' in type_str or 'int' in type_str:
min_ = min(params)
max_ = max(params)
log10_min_ = _np.log10(min_)
log10_max_ = _np.log10(max_)
if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing
space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)
if self.verbose>9:
print('\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)
else:
if 'int' in type_str:
space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)
if self.verbose>9:
print('\toutput:',key, 'quniform', min_, max_)
elif 'float' in type_str:
space[key] = _hyperopt.hp.uniform(key, min_, max_)
if self.verbose>9:
print('\toutput:',key, 'uniform', min_, max_)
elif 'str' in type_str:
space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])
if self.verbose>9:
print('\toutput:',key, 'choice', [i for i in range(len(params))])
else:
raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')
assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))
if self.verbose>9:
print('...finished building space')
_warnings.filterwarnings('default')
return space
def _plot_space(self, space):
'''
Generate plots to visualize the probability distribution for the parameter space being evaluated.
Arguments:
----------
space: dictionary of form {<parameter ID>: hyperopt.hp... object} generated from the '_build_space()' function
Returns:
-------
None. displays histograms showing the probability space
'''
n_samples = 5000
for title, space_slice in space.items():
evaluated = [_hyperopt.pyll.stochastic.sample(space_slice) for _ in range(n_samples)]
_plt.title(title)
_plt.hist(evaluated)
_plt.grid(which='both',visible=False)
_plt.show()
def _update_model_params(self, params, model_ID, model, param_grid):
"""
Iterate through the params and update the models arguments/params, ensuring the type of each parameter does not change after updating and transforming log10 distributions back to their base value
Arguments:
----------
params: hyperparameter dictionary being evaluated by hyperopt
model: model being evaluated
param_grid: original parameter grid under evaluation
Returns
-------
params_transform: dictionary similar to params, but transformed to match the inputs required by the model
model: Updated model object with the params under evaluation applied to the models arguments by updating the model.__dict__ values.
"""
params = params.copy()
param_grid = param_grid.copy()
params_transform = {}
for key in params.keys():
if 'log10.' in key:
log10_transform = True
else:
log10_transform = False
key = key.replace('log10.','')
type_str = str(type(param_grid[key][0]))
if 'int' in type_str:
if log10_transform:
params_transform[key] = int(10**params['log10.'+key])
else:
params_transform[key] = int(params[key])
elif 'float' in type_str:
if log10_transform:
params_transform[key] = float(10**params['log10.'+key])
else:
params_transform[key] = float(params[key])
elif 'str' in type_str: #index the param grid for hyperparams using 'choice'
params_transform[key] = param_grid[key][params[key]]
if 'densenet' not in model_ID.lower():
model.__dict__[key] = params_transform[key]
assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)
if 'str' in type_str:
assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])
else:
assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )
if 'densenet' in model_ID.lower():
model = model(**params_transform)
return params_transform, model
def _objective(self, params, model_ID, model_dict, X, y, **kwargs):
"""
Objective function for hyperopt fmin. Note hyperopt assumes the only argument required is the params argument, thus before passing this objective as an argument into the hyperopt.fmin() function, we specify the other arguments using the functools.partial() function (see the _single_model_BayesianSearchCV() function code for more details)
Arguments:
----------
params: hyperparameter dictionary for an individual evaluation
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X: dataframe of features on which the cv_score will be evaluated
y: dataframe of labels on which the cv_score will be evaluated
Returns:
-------
objective: dictionary of form {'loss': cv_score,
'params': hyperparameters using the the evaluation,
'status': hyperopt.STATUS_OK,
'eval_time': evaluation time}
Notes:
------
sklearn-style models try to maximize their score by default, while hyperopt assumes we are trying to minimize our loss, thus if a scoring metric is not defined, or if a metric is specified with a maximize boolean==True, the cv_score will be transformed by cv_score=1/cv_score before being output to the hyperopt fmin optimizer.
In contrast, in Neural Net models, the default scorer is the loss function, thus if the cv_score will only be transformed to 1/cv_score if scoring['maximize']=True and scoring['metric']!=None
"""
model = model_dict['model']
param_grid = model_dict['param_grid'].copy()
params = params.copy()
obj_verbose = max(0,self.verbose-2)
type_X = str(type(X))
if 'dask' in type_X:
X = X.compute()
y = y.compute()
if obj_verbose>=2:
print('params',params)
params_transform, model = self._update_model_params(params,
model_ID,
model,
param_grid)
type_model = str(type(model))
if obj_verbose>=2:
print('params_transform',params_transform)
if 'sklearn' in type_model or 'xgboost' in type_model:
cv_scores = _sklearn_model_selection.cross_val_score(model, X, y,
scoring= self.scoring['metric'],
cv = self.cv,
n_jobs= self.n_jobs,
verbose = obj_verbose
)
else: #using neural net function
import tensorflow as _tf
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
cv_scores = _NeuralNet.cross_val_score(model,
batch_size,
epochs,
X, y,
callbacks,
scoring = self.scoring['metric'],
cv = self.cv,
verbose= obj_verbose)
cv_score = _np.mean(cv_scores)
if 'sklearn' in type_model or 'xgboost' in type_model:
if self.scoring['maximize']==True or self.scoring['metric']==None:
cv_score = 1/cv_score
else:
if self.scoring['maximize']==True and self.scoring['metric']!=None :
cv_score = 1/cv_score
objective = {'loss': cv_score,
'params': params,
'status': _hyperopt.STATUS_OK,
'eval_time': _time.time()}
return objective
def _single_model_BayesianSearchCV(self,
model_ID,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
refit=True,
**kwargs):
"""
Run BayesianSearchCV on a single model of interest, save the results, and return the updated model_dict
Arguments:
----------
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X_train, y_train, X_test, y_test: training and test sets under evaluation
path_model_dir: path to directory where the model results will be saved. For none-NeuralNet models, the model_dict will be saved as model_dict.dill. For NeuralNets, the model and othere relevant parameters will be saved using keras-based saving methods.
refit: boolean. whether or not to refit the model on the full training set using the best_params
Returns:
--------
model_dict: the passed model_dict, but with key-value pairs for: 'best_params', 'best_model', 'best_cv_score'
"""
if self.verbose>=1:
print('Fitting',self.cv,'folds for each of',self.max_evals,'candidates, totalling',self.cv*self.max_evals,'fits')
model_dict = model_dict.copy()
model = model_dict['model']
type_model = str(type(model))
model_type = str(type(model_dict['model']))
param_grid = model_dict['param_grid'].copy()
objective = _functools.partial(self._objective,
model_ID = model_ID,
model_dict = model_dict,
X = X_train, y=y_train,
**kwargs)
space = self._build_space(param_grid)
if self.verbose>=4:
self._plot_space(space)
best_params_bad_keys = _hyperopt.fmin(fn = objective,
space = space,
algo = _hyperopt.tpe.suggest,
max_evals = self.max_evals,
trials = _hyperopt.Trials(),
verbose = self.verbose)
# hyperopt doesn't return the best params dict with keys matching the 'space' keys.
# This breaks handling of 'log10.' transformed parameters. Fix is implemented below
best_params_ = {}
for key in space.keys():
best_params_[key] = best_params_bad_keys[key.replace('log10.','')]
if self.verbose>=3:
print('hyperopt_input_best_params_:',best_params_)
best_score_ = self._objective(best_params_,
model_ID,
model_dict = model_dict,
X = X_train, y=y_train)['loss']
#transform params back to original model values
best_params_, best_model_ = self._update_model_params(best_params_, model_ID, model, param_grid)
if self.verbose>=3:
print('model_input_best_params_:',best_params_)
if refit:
if 'sklearn' in type_model or 'xgboost' in type_model:
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
best_model_.fit(X_train, y_train)
else: #using neural net function
import tensorflow as _tf
if 'dataframe' in str(type(X_train)).lower():
X_train = _np.array(X_train)
X_test = _np.array(X_test)
if 'dataframe' in str(type(y_train)).lower():
y_train = _np.array(y_train)
y_test = _np.array(y_test)
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
history = best_model_.fit(x= X_train,
y= y_train,
validation_data=(X_test, y_test),
batch_size=batch_size,
epochs = epochs,
verbose= max(0,self.verbose-2),
callbacks = callbacks)
model_dict['best_params'] = best_params_
model_dict['best_model'] = best_model_
model_dict['best_cv_score'] = best_score_
if 'sklearn' in model_type or 'xgboost' in model_type:
self.save(model_dict, 'model_dict', 'dill', path_model_dir)
else:
if _os.path.isdir(path_model_dir)==False:
_os.makedirs(path_model_dir)
best_model_.save(_os.path.join(path_model_dir, 'best_model.h5'))
self.save(model_dict['best_params'], 'best_params', 'dill', path_model_dir)
return model_dict
def fit(self,
X_train,
y_train,
X_test,
y_test,
max_evals,
**kwargs,
):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
max_evals: Max number of evaluations to perform during the BayesianSearchCV procedure for each model.
kwargs: For use in neural network hyperopts: epochs, batch_size, callbacks
Returns:
-------
None. The models_dict dictionary will be updated for each model to include key-value pairs for: 'best_params', 'best_model', 'best_cv_score', 'best_pred_score', and a key-value pair for each of the metrics in the metrics dictionary, where the 'best_pred_score' and the metrics are evaluated on the test set passed
"""
self.max_evals = max_evals
for key in self.models_dict.keys():
path_model_dir = self.path_model_dirs[key]
if self.verbose >=1:
print('\n----',key,'----')
print('path_model_dir:',path_model_dir)
model_dict = self.models_dict[key]
model_type = str(type(model_dict['model']))
if 'sklearn' in model_type or 'xgboost' in model_type:
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_model.h5')
if self.retrain or _os.path.isfile(path_file)==False:
model_dict = self._single_model_BayesianSearchCV(key,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
**kwargs)
self.models_dict[key] = model_dict
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key]['best_model'] = _NeuralNet.utils.load_model(
_os.path.join(path_model_dir,'best_model.h5'))
self.models_dict[key]['best_params'] = self.load('best_params', 'dill', path_model_dir)
if 'Net' in key:
y_pred = self.models_dict[key]['best_model'].predict(_np.array(X_test))
else:
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
y_pred_proba = self.models_dict[key]['best_model'].predict_proba(X_test)[:,1]
else:
if 'crossentropy' in self.models_dict[key]['best_model'].loss:
y_pred_proba = y_pred
y_pred = (y_pred < 0.5).astype(int)
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(_np.array(X_test),
_np.array(y_test),
verbose =0)
if self.verbose >=1:
try:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
except Exception as e:
print('Exception occured for:'+str(e))
try:
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
except Exception as e:
print('Exception occured for:'+str(e))
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
if 'roc' in metric_key:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred_proba)
else:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key) | pyDSlib/ML/model_selection/_search.py | import numpy as _np
import os as _os
import hyperopt as _hyperopt
import time as _time
import functools as _functools
import warnings as _warnings
import matplotlib.pyplot as _plt
import sklearn.model_selection as _sklearn_model_selection
from .. import NeuralNet as _NeuralNet
from ... import file_utils as _file_utils
from hyperopt import base as _base
_base.have_bson = False
class GridSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_GridSearchCV_dir = 'GridSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
hyperparameter GridSearchCV across different types of models
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary with formating like {metric name (str), metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_GridSearchCV_dir: root directory where the GridSearchCV outputs will be saved.
n_jobs: int. Default: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: verbosity of prints.
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_GridSearchCV_dir = path_GridSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
def load_NeuralNet(self, path_model_dir, X_train, y_train, epochs):
"""
load model_dict for Nueral Net case
"""
#fetch best params
best_params_ = self.load('best_params_', 'dill', path_model_dir)
#rebuild model_dict
model_dict = _NeuralNet.DenseNet.model_dict(**best_params_)
model_dict['best_model'] = _NeuralNet.utils.load_model(_os.path.join(path_model_dir,'best_estimator_.h5'))
model_dict['best_params'] = best_params_
model_dict['best_cv_score'] = _np.nan
return model_dict
def _single_model_GridSearchCV(self,
model_dict_,
X_train, y_train, X_test, y_test,
path_model_dir):
"""
Run Grid Search CV on a single model specified by the "key" argument
"""
type_model = str(type(model_dict_['model']))
type_X_train = str(type(X_train))
if ('sklearn' in type_model or 'xgboost' in type_model) and 'dask' not in type_X_train:
GridSearchCV = _sklearn_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
verbose = _np.max((0,self.verbose-1))
)
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
GridSearchCV.fit(X_train,y_train)
elif 'dask' in type_X_train:
from ..dask_ml_extend import model_selection as dask_ml_model_selection
GridSearchCV = dask_ml_model_selection.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
n_jobs= self.n_jobs,
cv = self.cv,
scoring= self.scoring['metric'],
)
GridSearchCV.fit(X_train, y_train)
else: #run gridsearch using neural net function
if self.scoring['metric'] == None:
self.scoring={'metric': 'loss', 'maximize': False}
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
GridSearchCV = _NeuralNet.search.GridSearchCV(model_dict_['model'],
model_dict_['param_grid'],
cv = self.cv,
scoring=self.scoring,
epochs = epochs,
path_report_folder = path_model_dir,
verbose = _np.max((0,self.verbose-1))
)
GridSearchCV.fit(X_train, y_train, X_test, y_test)
model_dict_['best_model'] = GridSearchCV.best_estimator_
model_dict_['best_params'] = GridSearchCV.best_params_
model_dict_['best_cv_score'] = GridSearchCV.best_score_
if 'sklearn' in str(type(model_dict_['model'])):
self.save(model_dict_, 'model_dict', 'dill', path_model_dir)
return model_dict_
def fit(self,
X_train,
y_train,
X_test,
y_test):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
"""
#instantiate path_model_dirs dictionary so we can know where the models are saved
self.path_model_dirs = {}
for key in self.models_dict.keys():
if self.verbose >=1: print('\n----',key,'----')
#define model directory
path_model_dir = _os.path.join(self.path_GridSearchCV_dir, key)
self.path_model_dirs[key] = path_model_dir
if self.verbose >=1: print('path_model_dir:',path_model_dir)
model_type = type(self.models_dict[key]['model'])
if 'sklearn' in str(model_type) or 'xgboost' in str(model_type):
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_params_.dill')
if self.retrain or _os.path.isfile(path_file)==False:
self.models_dict[key] = self._single_model_GridSearchCV(self.models_dict[key],
X_train, y_train,
X_test, y_test,
path_model_dir)
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key] = self.load_NeuralNet(path_model_dir,
X_train, y_train,
epochs)
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
else:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(X_test, y_test, verbose =0)
if self.verbose >=1:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key)
class BayesianSearchCV():
def __init__(self,
models_dict,
cv = 4,
scoring= {'metric':None,'maximize':True},
metrics = {None:None},
retrain = True,
path_BayesianSearchCV_dir = 'BayesianSearchCV',
n_jobs = -1,
verbose = 2,
**kwargs):
"""
Hyperparameter BayesianSearchCV across different types of models. This class leverages the hyperopt API.
Arguments:
----------
models_dict: dictionary containing all models and their param_grid.
- Dictionary Format: {'model name':{'model':model object,
'param_grid': {parameter name, parameter list}]
cv: cross-validation index.
scoring: Default: None.
- If scoring['metric'] = None, use default score for given sklearn model, or use 'loss' for neural network.
- For custom scoring functions, pass 'scoring = {'metric':function or key-word string,
'maximize':True/False}
- for sklearn/xgboost/dask_ml GridSearchCV, a list of valid metrics can be printed via 'sklearn.metrics.SCORERS.keys()'
metrics: dictionary of the form {metric name (str): metric function (sklearn.metrics...)}. The metric will be evaluated after CV on the test set
retrain: Boolean. whether or not you want to retrain the model if it is already been saved in the path_GridSearchCV_dir folder
path_BayesianSearchCV_dir: root directory where the BayesianSearchCV outputs will be saved.
n_jobs: int. Defualt: -1. number of parallel jobs to run. If -1, all available threads will be used
- Note: parallel computing is not supported for Neural Net models
verbose: print-out verbosity
Notes:
------
Technically, the optimization is performed using the tree-structured parzeen estimator approach, not a pure bayesian estimator. This approach is more efficient handling hyperparameter optimization tasks with high dimensions and small fitness evaluation budgets. See more details in the paper linked below
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
"""
self.models_dict = models_dict
self.cv = cv
self.scoring = scoring
self.metrics = metrics
self.retrain = retrain
self.path_BayesianSearchCV_dir = path_BayesianSearchCV_dir
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
self.save = _file_utils.save
self.load = _file_utils.load
#define model directory
self.path_model_dirs = {}
for key in self.models_dict.keys():
self.path_model_dirs[key] = _os.path.join(self.path_BayesianSearchCV_dir, key)
def _build_space(self, param_grid):
"""
Build the hyperparameter space for input into hyperopt.fmin() function.
Arguments:
----------
param_grid: hyperparameter dictionary with key-list pairs.
Returns:
--------
space: dictionary with key-hyperopt.hp... pairs
Notes:
------
For each hyperparameter of interest, the max and min in the list of possible values in the param_grid[key] element is evaluated. If the difference between the number of decades between the min and max value is greater than 1, a uniform probability distribution will be sampled between log10(min) and log10(max). This will result in the prefixe 'log10.' being pre-pended to the key in the 'space' dict for the given hyperparameter under consideration.
For the case of non-numeric hyperparameters, the space[key] value will be assigned using the hyperopt.hp.choice() function, with the choices being in integer form (index), rather than their raw string value.
To convert the hyperparameters from hyperopts 'space' back to the parameters required by the model under evaluation, we run the function '_update_model_params()' in each instance of the 'objective' function evaluation.
"""
if self.verbose>9:
'Building param space...'
_warnings.filterwarnings('ignore')
param_grid = param_grid.copy()
space = {}
for key in param_grid.keys():
params = param_grid[key]
if self.verbose>9:
print('\tinput:',key, params)
type_str = str(type(params[0]))
if 'float' in type_str or 'int' in type_str:
min_ = min(params)
max_ = max(params)
log10_min_ = _np.log10(min_)
log10_max_ = _np.log10(max_)
if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing
space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)
if self.verbose>9:
print('\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)
else:
if 'int' in type_str:
space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)
if self.verbose>9:
print('\toutput:',key, 'quniform', min_, max_)
elif 'float' in type_str:
space[key] = _hyperopt.hp.uniform(key, min_, max_)
if self.verbose>9:
print('\toutput:',key, 'uniform', min_, max_)
elif 'str' in type_str:
space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])
if self.verbose>9:
print('\toutput:',key, 'choice', [i for i in range(len(params))])
else:
raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')
assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))
if self.verbose>9:
print('...finished building space')
_warnings.filterwarnings('default')
return space
def _plot_space(self, space):
'''
Generate plots to visualize the probability distribution for the parameter space being evaluated.
Arguments:
----------
space: dictionary of form {<parameter ID>: hyperopt.hp... object} generated from the '_build_space()' function
Returns:
-------
None. displays histograms showing the probability space
'''
n_samples = 5000
for title, space_slice in space.items():
evaluated = [_hyperopt.pyll.stochastic.sample(space_slice) for _ in range(n_samples)]
_plt.title(title)
_plt.hist(evaluated)
_plt.grid(which='both',visible=False)
_plt.show()
def _update_model_params(self, params, model_ID, model, param_grid):
"""
Iterate through the params and update the models arguments/params, ensuring the type of each parameter does not change after updating and transforming log10 distributions back to their base value
Arguments:
----------
params: hyperparameter dictionary being evaluated by hyperopt
model: model being evaluated
param_grid: original parameter grid under evaluation
Returns
-------
params_transform: dictionary similar to params, but transformed to match the inputs required by the model
model: Updated model object with the params under evaluation applied to the models arguments by updating the model.__dict__ values.
"""
params = params.copy()
param_grid = param_grid.copy()
params_transform = {}
for key in params.keys():
if 'log10.' in key:
log10_transform = True
else:
log10_transform = False
key = key.replace('log10.','')
type_str = str(type(param_grid[key][0]))
if 'int' in type_str:
if log10_transform:
params_transform[key] = int(10**params['log10.'+key])
else:
params_transform[key] = int(params[key])
elif 'float' in type_str:
if log10_transform:
params_transform[key] = float(10**params['log10.'+key])
else:
params_transform[key] = float(params[key])
elif 'str' in type_str: #index the param grid for hyperparams using 'choice'
params_transform[key] = param_grid[key][params[key]]
if 'densenet' not in model_ID.lower():
model.__dict__[key] = params_transform[key]
assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)
if 'str' in type_str:
assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])
else:
assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )
if 'densenet' in model_ID.lower():
model = model(**params_transform)
return params_transform, model
def _objective(self, params, model_ID, model_dict, X, y, **kwargs):
"""
Objective function for hyperopt fmin. Note hyperopt assumes the only argument required is the params argument, thus before passing this objective as an argument into the hyperopt.fmin() function, we specify the other arguments using the functools.partial() function (see the _single_model_BayesianSearchCV() function code for more details)
Arguments:
----------
params: hyperparameter dictionary for an individual evaluation
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X: dataframe of features on which the cv_score will be evaluated
y: dataframe of labels on which the cv_score will be evaluated
Returns:
-------
objective: dictionary of form {'loss': cv_score,
'params': hyperparameters using the the evaluation,
'status': hyperopt.STATUS_OK,
'eval_time': evaluation time}
Notes:
------
sklearn-style models try to maximize their score by default, while hyperopt assumes we are trying to minimize our loss, thus if a scoring metric is not defined, or if a metric is specified with a maximize boolean==True, the cv_score will be transformed by cv_score=1/cv_score before being output to the hyperopt fmin optimizer.
In contrast, in Neural Net models, the default scorer is the loss function, thus if the cv_score will only be transformed to 1/cv_score if scoring['maximize']=True and scoring['metric']!=None
"""
model = model_dict['model']
param_grid = model_dict['param_grid'].copy()
params = params.copy()
obj_verbose = max(0,self.verbose-2)
type_X = str(type(X))
if 'dask' in type_X:
X = X.compute()
y = y.compute()
if obj_verbose>=2:
print('params',params)
params_transform, model = self._update_model_params(params,
model_ID,
model,
param_grid)
type_model = str(type(model))
if obj_verbose>=2:
print('params_transform',params_transform)
if 'sklearn' in type_model or 'xgboost' in type_model:
cv_scores = _sklearn_model_selection.cross_val_score(model, X, y,
scoring= self.scoring['metric'],
cv = self.cv,
n_jobs= self.n_jobs,
verbose = obj_verbose
)
else: #using neural net function
import tensorflow as _tf
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
cv_scores = _NeuralNet.cross_val_score(model,
batch_size,
epochs,
X, y,
callbacks,
scoring = self.scoring['metric'],
cv = self.cv,
verbose= obj_verbose)
cv_score = _np.mean(cv_scores)
if 'sklearn' in type_model or 'xgboost' in type_model:
if self.scoring['maximize']==True or self.scoring['metric']==None:
cv_score = 1/cv_score
else:
if self.scoring['maximize']==True and self.scoring['metric']!=None :
cv_score = 1/cv_score
objective = {'loss': cv_score,
'params': params,
'status': _hyperopt.STATUS_OK,
'eval_time': _time.time()}
return objective
def _single_model_BayesianSearchCV(self,
model_ID,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
refit=True,
**kwargs):
"""
Run BayesianSearchCV on a single model of interest, save the results, and return the updated model_dict
Arguments:
----------
model_dict: dictionary of form {'model': estimator/model object,
'param_grid':dictionary defining the hyperparameter bounds}
X_train, y_train, X_test, y_test: training and test sets under evaluation
path_model_dir: path to directory where the model results will be saved. For none-NeuralNet models, the model_dict will be saved as model_dict.dill. For NeuralNets, the model and othere relevant parameters will be saved using keras-based saving methods.
refit: boolean. whether or not to refit the model on the full training set using the best_params
Returns:
--------
model_dict: the passed model_dict, but with key-value pairs for: 'best_params', 'best_model', 'best_cv_score'
"""
if self.verbose>=1:
print('Fitting',self.cv,'folds for each of',self.max_evals,'candidates, totalling',self.cv*self.max_evals,'fits')
model_dict = model_dict.copy()
model = model_dict['model']
type_model = str(type(model))
model_type = str(type(model_dict['model']))
param_grid = model_dict['param_grid'].copy()
objective = _functools.partial(self._objective,
model_ID = model_ID,
model_dict = model_dict,
X = X_train, y=y_train,
**kwargs)
space = self._build_space(param_grid)
if self.verbose>=4:
self._plot_space(space)
best_params_bad_keys = _hyperopt.fmin(fn = objective,
space = space,
algo = _hyperopt.tpe.suggest,
max_evals = self.max_evals,
trials = _hyperopt.Trials(),
verbose = self.verbose)
# hyperopt doesn't return the best params dict with keys matching the 'space' keys.
# This breaks handling of 'log10.' transformed parameters. Fix is implemented below
best_params_ = {}
for key in space.keys():
best_params_[key] = best_params_bad_keys[key.replace('log10.','')]
if self.verbose>=3:
print('hyperopt_input_best_params_:',best_params_)
best_score_ = self._objective(best_params_,
model_ID,
model_dict = model_dict,
X = X_train, y=y_train)['loss']
#transform params back to original model values
best_params_, best_model_ = self._update_model_params(best_params_, model_ID, model, param_grid)
if self.verbose>=3:
print('model_input_best_params_:',best_params_)
if refit:
if 'sklearn' in type_model or 'xgboost' in type_model:
if y_train.shape[1]==1:
y_train = _np.array(y_train).reshape(-1,)
best_model_.fit(X_train, y_train)
else: #using neural net function
import tensorflow as _tf
if 'dataframe' in str(type(X_train)).lower():
X_train = _np.array(X_train)
X_test = _np.array(X_test)
if 'dataframe' in str(type(y_train)).lower():
y_train = _np.array(y_train)
y_test = _np.array(y_test)
#check for kwargs
epochs = 100
batch_size = 32
callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]
for item in kwargs.items():
if 'epochs' in item[0]:
epochs = item[1]
elif 'batch_size' in item[0]:
batch_size = item[1]
elif 'callbacks' in item[0]:
callbacks = item[1]
history = best_model_.fit(x= X_train,
y= y_train,
validation_data=(X_test, y_test),
batch_size=batch_size,
epochs = epochs,
verbose= max(0,self.verbose-2),
callbacks = callbacks)
model_dict['best_params'] = best_params_
model_dict['best_model'] = best_model_
model_dict['best_cv_score'] = best_score_
if 'sklearn' in model_type or 'xgboost' in model_type:
self.save(model_dict, 'model_dict', 'dill', path_model_dir)
else:
if _os.path.isdir(path_model_dir)==False:
_os.makedirs(path_model_dir)
best_model_.save(_os.path.join(path_model_dir, 'best_model.h5'))
self.save(model_dict['best_params'], 'best_params', 'dill', path_model_dir)
return model_dict
def fit(self,
X_train,
y_train,
X_test,
y_test,
max_evals,
**kwargs,
):
"""
Fit the X_train, y_train dataset & evaluate metrics on X_test, y_test for each of the best models found in each individual models GridSearchCV
Arguments:
---------
X_train, y_train, X_test, y_test: train & test datasets (pandas or dask dataframes)
max_evals: Max number of evaluations to perform during the BayesianSearchCV procedure for each model.
kwargs: For use in neural network hyperopts: epochs, batch_size, callbacks
Returns:
-------
None. The models_dict dictionary will be updated for each model to include key-value pairs for: 'best_params', 'best_model', 'best_cv_score', 'best_pred_score', and a key-value pair for each of the metrics in the metrics dictionary, where the 'best_pred_score' and the metrics are evaluated on the test set passed
"""
self.max_evals = max_evals
for key in self.models_dict.keys():
path_model_dir = self.path_model_dirs[key]
if self.verbose >=1:
print('\n----',key,'----')
print('path_model_dir:',path_model_dir)
model_dict = self.models_dict[key]
model_type = str(type(model_dict['model']))
if 'sklearn' in model_type or 'xgboost' in model_type:
path_file = _os.path.join(path_model_dir,'model_dict.dill')
elif 'Net' in key:
path_file = _os.path.join(path_model_dir,'best_model.h5')
if self.retrain or _os.path.isfile(path_file)==False:
model_dict = self._single_model_BayesianSearchCV(key,
model_dict,
X_train, y_train,
X_test, y_test,
path_model_dir,
**kwargs)
self.models_dict[key] = model_dict
else: #reload previously trained model
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)
elif 'Net' in key:
#check kwargs for epochs
epochs = 100
for item in self.kwargs.items():
if 'epochs' in item[0]: epochs = item[1]
self.models_dict[key]['best_model'] = _NeuralNet.utils.load_model(
_os.path.join(path_model_dir,'best_model.h5'))
self.models_dict[key]['best_params'] = self.load('best_params', 'dill', path_model_dir)
if 'Net' in key:
y_pred = self.models_dict[key]['best_model'].predict(_np.array(X_test))
else:
y_pred = self.models_dict[key]['best_model'].predict(X_test)
if 'Net' not in key:
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)
y_pred_proba = self.models_dict[key]['best_model'].predict_proba(X_test)[:,1]
else:
if 'crossentropy' in self.models_dict[key]['best_model'].loss:
y_pred_proba = y_pred
y_pred = (y_pred < 0.5).astype(int)
self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(_np.array(X_test),
_np.array(y_test),
verbose =0)
if self.verbose >=1:
try:
print('\tbest_cv_score:',self.models_dict[key]['best_cv_score'])
except Exception as e:
print('Exception occured for:'+str(e))
try:
print('\tbest_pred_score:',self.models_dict[key]['best_pred_score'])
except Exception as e:
print('Exception occured for:'+str(e))
for metric_key in self.metrics.keys():
if self.metrics[metric_key] !=None:
try:
if 'roc' in metric_key:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred_proba)
else:
self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)
print('\t',metric_key,':',self.models_dict[key][metric_key])
except Exception as e:
print('Exception occured for',metric_key,':',str(e))
if 'sklearn' in str(type(self.models_dict[key]['model'])):
self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)
elif 'Net' in key:
model_dict_subset = self.models_dict[key].copy()
for key in self.models_dict[key].keys():
if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):
model_dict_subset.pop(key) | 0.596316 | 0.209955 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import gaussian_process
from tensor2tensor.utils import test_utils
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
class GaussianProcessTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes()
def testGaussianProcessPosterior(self):
train_batch_size = 3
test_batch_size = 2
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(train_batch_size, input_dim))
labels = tf.to_float(np.random.rand(train_batch_size, output_dim))
layer = gaussian_process.GaussianProcess(output_dim,
conditional_inputs=features,
conditional_outputs=labels)
test_features = tf.to_float(np.random.rand(test_batch_size, input_dim))
test_labels = tf.to_float(np.random.rand(test_batch_size, output_dim))
test_outputs = layer(test_features)
test_nats = -test_outputs.distribution.log_prob(test_labels)
self.evaluate(tf.global_variables_initializer())
test_nats_val, outputs_val = self.evaluate([test_nats, test_outputs])
self.assertEqual(test_nats_val.shape, ())
self.assertGreaterEqual(test_nats_val, 0.)
self.assertEqual(outputs_val.shape, (test_batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testGaussianProcessPrior(self):
batch_size = 3
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(batch_size, input_dim))
labels = tf.to_float(np.random.rand(batch_size, output_dim))
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation=None),
gaussian_process.GaussianProcess(output_dim),
])
outputs = model(features)
log_prob = outputs.distribution.log_prob(labels)
self.evaluate(tf.global_variables_initializer())
log_prob_val, outputs_val = self.evaluate([log_prob, outputs])
self.assertEqual(log_prob_val.shape, ())
self.assertLessEqual(log_prob_val, 0.)
self.assertEqual(outputs_val.shape, (batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testSparseGaussianProcess(self):
dataset_size = 10
batch_size = 3
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(batch_size, input_dim))
labels = tf.to_float(np.random.rand(batch_size, output_dim))
model = gaussian_process.SparseGaussianProcess(output_dim, num_inducing=2)
with tf.GradientTape() as tape:
predictions = model(features)
nll = -tf.reduce_mean(predictions.distribution.log_prob(labels))
kl = sum(model.losses) / dataset_size
loss = nll + kl
self.evaluate(tf.global_variables_initializer())
grads = tape.gradient(nll, model.variables)
for grad in grads:
self.assertIsNotNone(grad)
loss_val, predictions_val = self.evaluate([loss, predictions])
self.assertEqual(loss_val.shape, ())
self.assertGreaterEqual(loss_val, 0.)
self.assertEqual(predictions_val.shape, (batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testBayesianLinearModel(self):
"""Tests that model makes reasonable predictions."""
np.random.seed(42)
train_batch_size = 5
test_batch_size = 2
num_features = 3
noise_variance = 0.01
coeffs = tf.range(num_features, dtype=tf.float32)
features = tf.to_float(np.random.randn(train_batch_size, num_features))
labels = (tf.tensordot(features, coeffs, [[-1], [0]])
+ noise_variance * tf.to_float(np.random.randn(train_batch_size)))
model = gaussian_process.BayesianLinearModel(noise_variance=noise_variance)
model.fit(features, labels)
test_features = tf.to_float(np.random.randn(test_batch_size, num_features))
test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
outputs = model(test_features)
test_predictions = outputs.distribution.mean()
test_predictions_variance = outputs.distribution.variance()
[
test_labels_val, test_predictions_val, test_predictions_variance_val,
] = self.evaluate(
[test_labels, test_predictions, test_predictions_variance])
self.assertEqual(test_predictions_val.shape, (test_batch_size,))
self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))
self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)
self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
def train_neural_process(model,
train_data,
valid_data,
num_epochs,
batch_size,
learning_rate=1e-4):
"""Trains the NeuralProcess model.
Validation data is used for early stopping,
Args:
model: A NeuralProcess Model subclassing Keras model.
train_data: (4-tuple of tensors) Values of x and y for contexts and targets.
valid_data: 4-tuple of tensors) Values of x and y for contexts and targets.
num_epochs: (int) Number of epochs to train the model for.
batch_size: (int) Size of batch.
learning_rate: (float) Learning rate for Adam optimizer.
Returns:
best_loss: (float) Average validation loss of best early-stopped model.
"""
optimizer = tf.keras.optimizers.Adam(learning_rate)
context_x, context_y, target_x, target_y = train_data
valid_context_x, valid_context_y, valid_target_x, valid_target_y = valid_data
train_data_size = target_x.shape[0]
num_updates_per_epoch = train_data_size//batch_size
best_loss = np.inf
valid_query = (valid_context_x, valid_context_y), valid_target_x
for _ in range(num_epochs):
for i in range(num_updates_per_epoch):
start_idx, end_idx = batch_size*i, batch_size*(i+1)
batch_query = ((context_x[start_idx:end_idx],
context_y[start_idx:end_idx]),
target_x[start_idx:end_idx])
batch_target_y = target_y[start_idx:end_idx]
num_targets = tf.shape(batch_target_y)[1]
with tf.GradientTape() as tape:
predictive_dist = model(batch_query, batch_target_y)
log_p = predictive_dist.log_prob(batch_target_y)
kl = tf.tile(model.losses[-1], [1, num_targets])
loss = -tf.reduce_mean(log_p - kl/tf.cast(num_targets, tf.float32))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
predictive_dist = model(valid_query, valid_target_y)
log_p = predictive_dist.log_prob(valid_target_y)
kl = tf.tile(model.losses[-1], [1, tf.shape(valid_target_y)[1]])
valid_loss = -tf.reduce_mean(log_p - kl/tf.cast(num_targets, tf.float32))
if valid_loss < best_loss:
best_loss = valid_loss
return best_loss
class NeuralProcessTest(tf.test.TestCase):
def setUp(self):
# Create a dummy multi-task fake dataset
num_train_problems = 32
num_valid_problems = 32
num_targets = 50
num_contexts = 10
input_dim = 5
def _create_fake_dataset(num_problems):
target_x = tf.cast(np.random.rand(num_problems,
num_targets,
input_dim),
tf.float32)
target_y = tf.cast(np.random.rand(num_problems, num_targets, 1),
tf.float32)
context_x, context_y = (target_x[:, :num_contexts, :],
target_y[:, :num_contexts, :])
return (context_x, context_y, target_x, target_y)
self.train_data = _create_fake_dataset(num_train_problems)
self.valid_data = _create_fake_dataset(num_valid_problems)
hidden_size = 128
num_latents = 16
np_attention_wrapper = gaussian_process.Attention(
rep='identity', output_sizes=None, att_type='uniform')
self.np_model = gaussian_process.NeuralProcess(
latent_encoder_sizes=[hidden_size]*4,
num_latents=num_latents,
decoder_sizes=[hidden_size]*2 + [2],
use_deterministic_path=True,
deterministic_encoder_sizes=[hidden_size]*4,
attention_wrapper=np_attention_wrapper)
anp_attention_wrapper = gaussian_process.Attention(
rep='mlp', output_sizes=[hidden_size]*2, att_type='multihead')
self.anp_model = gaussian_process.NeuralProcess(
latent_encoder_sizes=[hidden_size]*4,
num_latents=num_latents,
decoder_sizes=[hidden_size]*2 + [2],
use_deterministic_path=True,
deterministic_encoder_sizes=[hidden_size]*4,
attention_wrapper=anp_attention_wrapper)
self.models = [self.np_model, self.anp_model]
self.num_latents, self.hidden_size, self.num_targets = (num_latents,
hidden_size,
num_targets)
super(NeuralProcessTest, self).setUp()
def test_termination(self):
for model in self.models:
validation_loss = train_neural_process(
model,
self.train_data,
self.valid_data,
num_epochs=2,
batch_size=16,
learning_rate=1e-4)
self.assertGreaterEqual(validation_loss, 0.)
def test_latent_encoder(self):
valid_context_x, valid_context_y, _, _ = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
dist = model.latent_encoder(valid_context_x, valid_context_y).distribution
self.assertEqual(dist.loc.shape, (batch_size, self.num_latents))
self.assertEqual(dist.scale.shape,
(batch_size, self.num_latents, self.num_latents))
def test_deterministic_encoder(self):
valid_context_x, valid_context_y, valid_target_x, _ = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
embedding = model.deterministic_encoder(
valid_context_x, valid_context_y, valid_target_x)
self.assertEqual(embedding.shape, (batch_size, self.num_targets,
self.hidden_size))
def test_call(self):
valid_context_x, valid_context_y, valid_target_x, valid_target_y = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
query = (valid_context_x, valid_context_y), valid_target_x
# test 'training' when target_y is available
predictive_dist = model(query, valid_target_y)
self.assertEqual(predictive_dist.loc.shape, (batch_size, self.num_targets,
1))
self.assertEqual(predictive_dist.scale.shape,
(batch_size, self.num_targets, 1, 1))
self.assertAllGreaterEqual(model.losses, 0.)
# test 'testing' when target_y is unavailable
predictive_dist = model(query)
self.assertEqual(predictive_dist.loc.shape, (batch_size, self.num_targets,
1))
self.assertEqual(predictive_dist.scale.shape,
(batch_size, self.num_targets, 1, 1))
if __name__ == '__main__':
tf.test.main() | t2t_bert/utils/tensor2tensor/layers/gaussian_process_test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import gaussian_process
from tensor2tensor.utils import test_utils
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
class GaussianProcessTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes()
def testGaussianProcessPosterior(self):
train_batch_size = 3
test_batch_size = 2
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(train_batch_size, input_dim))
labels = tf.to_float(np.random.rand(train_batch_size, output_dim))
layer = gaussian_process.GaussianProcess(output_dim,
conditional_inputs=features,
conditional_outputs=labels)
test_features = tf.to_float(np.random.rand(test_batch_size, input_dim))
test_labels = tf.to_float(np.random.rand(test_batch_size, output_dim))
test_outputs = layer(test_features)
test_nats = -test_outputs.distribution.log_prob(test_labels)
self.evaluate(tf.global_variables_initializer())
test_nats_val, outputs_val = self.evaluate([test_nats, test_outputs])
self.assertEqual(test_nats_val.shape, ())
self.assertGreaterEqual(test_nats_val, 0.)
self.assertEqual(outputs_val.shape, (test_batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testGaussianProcessPrior(self):
batch_size = 3
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(batch_size, input_dim))
labels = tf.to_float(np.random.rand(batch_size, output_dim))
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation=None),
gaussian_process.GaussianProcess(output_dim),
])
outputs = model(features)
log_prob = outputs.distribution.log_prob(labels)
self.evaluate(tf.global_variables_initializer())
log_prob_val, outputs_val = self.evaluate([log_prob, outputs])
self.assertEqual(log_prob_val.shape, ())
self.assertLessEqual(log_prob_val, 0.)
self.assertEqual(outputs_val.shape, (batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testSparseGaussianProcess(self):
dataset_size = 10
batch_size = 3
input_dim = 4
output_dim = 5
features = tf.to_float(np.random.rand(batch_size, input_dim))
labels = tf.to_float(np.random.rand(batch_size, output_dim))
model = gaussian_process.SparseGaussianProcess(output_dim, num_inducing=2)
with tf.GradientTape() as tape:
predictions = model(features)
nll = -tf.reduce_mean(predictions.distribution.log_prob(labels))
kl = sum(model.losses) / dataset_size
loss = nll + kl
self.evaluate(tf.global_variables_initializer())
grads = tape.gradient(nll, model.variables)
for grad in grads:
self.assertIsNotNone(grad)
loss_val, predictions_val = self.evaluate([loss, predictions])
self.assertEqual(loss_val.shape, ())
self.assertGreaterEqual(loss_val, 0.)
self.assertEqual(predictions_val.shape, (batch_size, output_dim))
@test_utils.run_in_graph_and_eager_modes()
def testBayesianLinearModel(self):
"""Tests that model makes reasonable predictions."""
np.random.seed(42)
train_batch_size = 5
test_batch_size = 2
num_features = 3
noise_variance = 0.01
coeffs = tf.range(num_features, dtype=tf.float32)
features = tf.to_float(np.random.randn(train_batch_size, num_features))
labels = (tf.tensordot(features, coeffs, [[-1], [0]])
+ noise_variance * tf.to_float(np.random.randn(train_batch_size)))
model = gaussian_process.BayesianLinearModel(noise_variance=noise_variance)
model.fit(features, labels)
test_features = tf.to_float(np.random.randn(test_batch_size, num_features))
test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
outputs = model(test_features)
test_predictions = outputs.distribution.mean()
test_predictions_variance = outputs.distribution.variance()
[
test_labels_val, test_predictions_val, test_predictions_variance_val,
] = self.evaluate(
[test_labels, test_predictions, test_predictions_variance])
self.assertEqual(test_predictions_val.shape, (test_batch_size,))
self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))
self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)
self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
def train_neural_process(model,
train_data,
valid_data,
num_epochs,
batch_size,
learning_rate=1e-4):
"""Trains the NeuralProcess model.
Validation data is used for early stopping,
Args:
model: A NeuralProcess Model subclassing Keras model.
train_data: (4-tuple of tensors) Values of x and y for contexts and targets.
valid_data: 4-tuple of tensors) Values of x and y for contexts and targets.
num_epochs: (int) Number of epochs to train the model for.
batch_size: (int) Size of batch.
learning_rate: (float) Learning rate for Adam optimizer.
Returns:
best_loss: (float) Average validation loss of best early-stopped model.
"""
optimizer = tf.keras.optimizers.Adam(learning_rate)
context_x, context_y, target_x, target_y = train_data
valid_context_x, valid_context_y, valid_target_x, valid_target_y = valid_data
train_data_size = target_x.shape[0]
num_updates_per_epoch = train_data_size//batch_size
best_loss = np.inf
valid_query = (valid_context_x, valid_context_y), valid_target_x
for _ in range(num_epochs):
for i in range(num_updates_per_epoch):
start_idx, end_idx = batch_size*i, batch_size*(i+1)
batch_query = ((context_x[start_idx:end_idx],
context_y[start_idx:end_idx]),
target_x[start_idx:end_idx])
batch_target_y = target_y[start_idx:end_idx]
num_targets = tf.shape(batch_target_y)[1]
with tf.GradientTape() as tape:
predictive_dist = model(batch_query, batch_target_y)
log_p = predictive_dist.log_prob(batch_target_y)
kl = tf.tile(model.losses[-1], [1, num_targets])
loss = -tf.reduce_mean(log_p - kl/tf.cast(num_targets, tf.float32))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
predictive_dist = model(valid_query, valid_target_y)
log_p = predictive_dist.log_prob(valid_target_y)
kl = tf.tile(model.losses[-1], [1, tf.shape(valid_target_y)[1]])
valid_loss = -tf.reduce_mean(log_p - kl/tf.cast(num_targets, tf.float32))
if valid_loss < best_loss:
best_loss = valid_loss
return best_loss
class NeuralProcessTest(tf.test.TestCase):
def setUp(self):
# Create a dummy multi-task fake dataset
num_train_problems = 32
num_valid_problems = 32
num_targets = 50
num_contexts = 10
input_dim = 5
def _create_fake_dataset(num_problems):
target_x = tf.cast(np.random.rand(num_problems,
num_targets,
input_dim),
tf.float32)
target_y = tf.cast(np.random.rand(num_problems, num_targets, 1),
tf.float32)
context_x, context_y = (target_x[:, :num_contexts, :],
target_y[:, :num_contexts, :])
return (context_x, context_y, target_x, target_y)
self.train_data = _create_fake_dataset(num_train_problems)
self.valid_data = _create_fake_dataset(num_valid_problems)
hidden_size = 128
num_latents = 16
np_attention_wrapper = gaussian_process.Attention(
rep='identity', output_sizes=None, att_type='uniform')
self.np_model = gaussian_process.NeuralProcess(
latent_encoder_sizes=[hidden_size]*4,
num_latents=num_latents,
decoder_sizes=[hidden_size]*2 + [2],
use_deterministic_path=True,
deterministic_encoder_sizes=[hidden_size]*4,
attention_wrapper=np_attention_wrapper)
anp_attention_wrapper = gaussian_process.Attention(
rep='mlp', output_sizes=[hidden_size]*2, att_type='multihead')
self.anp_model = gaussian_process.NeuralProcess(
latent_encoder_sizes=[hidden_size]*4,
num_latents=num_latents,
decoder_sizes=[hidden_size]*2 + [2],
use_deterministic_path=True,
deterministic_encoder_sizes=[hidden_size]*4,
attention_wrapper=anp_attention_wrapper)
self.models = [self.np_model, self.anp_model]
self.num_latents, self.hidden_size, self.num_targets = (num_latents,
hidden_size,
num_targets)
super(NeuralProcessTest, self).setUp()
def test_termination(self):
for model in self.models:
validation_loss = train_neural_process(
model,
self.train_data,
self.valid_data,
num_epochs=2,
batch_size=16,
learning_rate=1e-4)
self.assertGreaterEqual(validation_loss, 0.)
def test_latent_encoder(self):
valid_context_x, valid_context_y, _, _ = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
dist = model.latent_encoder(valid_context_x, valid_context_y).distribution
self.assertEqual(dist.loc.shape, (batch_size, self.num_latents))
self.assertEqual(dist.scale.shape,
(batch_size, self.num_latents, self.num_latents))
def test_deterministic_encoder(self):
valid_context_x, valid_context_y, valid_target_x, _ = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
embedding = model.deterministic_encoder(
valid_context_x, valid_context_y, valid_target_x)
self.assertEqual(embedding.shape, (batch_size, self.num_targets,
self.hidden_size))
def test_call(self):
valid_context_x, valid_context_y, valid_target_x, valid_target_y = self.valid_data
batch_size = valid_context_x.shape[0]
for model in self.models:
query = (valid_context_x, valid_context_y), valid_target_x
# test 'training' when target_y is available
predictive_dist = model(query, valid_target_y)
self.assertEqual(predictive_dist.loc.shape, (batch_size, self.num_targets,
1))
self.assertEqual(predictive_dist.scale.shape,
(batch_size, self.num_targets, 1, 1))
self.assertAllGreaterEqual(model.losses, 0.)
# test 'testing' when target_y is unavailable
predictive_dist = model(query)
self.assertEqual(predictive_dist.loc.shape, (batch_size, self.num_targets,
1))
self.assertEqual(predictive_dist.scale.shape,
(batch_size, self.num_targets, 1, 1))
if __name__ == '__main__':
tf.test.main() | 0.931936 | 0.505066 |
from pandas import json_normalize
def activities_to_frame(data):
'''
Takes a data drilling activity object which is a dict from the Collabor8 response and
flattens it into a normalized data fram table to be used for further processing
Structure coming in from the Collabor8 drilling activity object
{
"data": {
"drilling": {
"drillingActivity": [
{
"created": "2020-03-26T11:01:52Z",
"modified": "2020-03-26T11:01:52Z",
.....
.....
Returned as a normalized datafram in the form of
created modified endTime ... measuredHoleStart.value trueVerticalDepth.unitOfMeasurement trueVerticalDepth.value
0 2020-03-19T13:11:31Z 2020-03-19T13:11:31Z 2020-03-17T01:45:00Z ... 0.0 0.0
'''
result=json_normalize(data,['data','drilling','drillingActivity'])
return result
def status_info_to_frame(data):
'''
Takes a data drilling status info object which is a dict from the Collabor8 response and
flattens it into a normalized data fram table to be used for further processing.
Parameters
------------
data: the dict containing the status info objects from GraphQL
Structure coming in from the Collabor8 drilling status info object
{
"data": {
"drilling": {
"statusInfo": [
{
"dataStartTime": "2020-03-24T23:00:00Z",
"dataEndTime": "2020-03-25T23:00:00Z",
"dataEntity": {
"name": "34/4-M-2 H",
"type": "wellbore"
.....
},
Returned as a normalized datafram in the form of
. dataStartTime dataEndTime sourceSystemReportName ... trueVerticalDepthKickoff.value wellheadElevation.unitOfMeasurement wellheadElevation.value
0 2020-03-16T23:00:00Z 2020-03-17T23:00:00Z NO 34/4-M-4 H Daily Drilling Report ... 0.0 m 0.7
'''
result=json_normalize(data,['data','drilling','statusInfo'])
return result
def lithology_info_to_frame(data):
"""
Takes a data drilling lithology object which is a dict from the Collabor8 response
and flattens it into a normalized Pandas dataframe to be used for further processing.
Parameters
----------
data : the dict containing the lithology object from GraphQL
Structure coming in from the Collabor8 drilling lithology object
{
"data": {
"drilling": {
"lithology": [
{
"dataEntity": {
"name": "34/4-M-2 H",
"uid": "8837"
},
"dataStartTime": "2020-03-27T23:00:00Z",
"dataEndTime": "2020-03-28T23:00:00Z",
"endTime": null,
"startTime": null,
Returned as a normalized dataframe in the form of
. dataStartTime dataEndTime endTime ... trueVerticalDepthTop.value trueVerticalDepthBottom.unitOfMeasurement trueVerticalDepthBottom.value
0 2015-01-01T00:00:00Z 2015-01-01T23:59:59Z None ... 1421.74 m 1814.91
1 2015-01-01T00:00:00Z 2015-01-01T23:59:59Z None ... 1415.82 m 1859.49
"""
result=json_normalize(data,['data','drilling','lithology'])
return result | subsurfaceCollabor8/drilling_frames.py | from pandas import json_normalize
def activities_to_frame(data):
'''
Takes a data drilling activity object which is a dict from the Collabor8 response and
flattens it into a normalized data fram table to be used for further processing
Structure coming in from the Collabor8 drilling activity object
{
"data": {
"drilling": {
"drillingActivity": [
{
"created": "2020-03-26T11:01:52Z",
"modified": "2020-03-26T11:01:52Z",
.....
.....
Returned as a normalized datafram in the form of
created modified endTime ... measuredHoleStart.value trueVerticalDepth.unitOfMeasurement trueVerticalDepth.value
0 2020-03-19T13:11:31Z 2020-03-19T13:11:31Z 2020-03-17T01:45:00Z ... 0.0 0.0
'''
result=json_normalize(data,['data','drilling','drillingActivity'])
return result
def status_info_to_frame(data):
'''
Takes a data drilling status info object which is a dict from the Collabor8 response and
flattens it into a normalized data fram table to be used for further processing.
Parameters
------------
data: the dict containing the status info objects from GraphQL
Structure coming in from the Collabor8 drilling status info object
{
"data": {
"drilling": {
"statusInfo": [
{
"dataStartTime": "2020-03-24T23:00:00Z",
"dataEndTime": "2020-03-25T23:00:00Z",
"dataEntity": {
"name": "34/4-M-2 H",
"type": "wellbore"
.....
},
Returned as a normalized datafram in the form of
. dataStartTime dataEndTime sourceSystemReportName ... trueVerticalDepthKickoff.value wellheadElevation.unitOfMeasurement wellheadElevation.value
0 2020-03-16T23:00:00Z 2020-03-17T23:00:00Z NO 34/4-M-4 H Daily Drilling Report ... 0.0 m 0.7
'''
result=json_normalize(data,['data','drilling','statusInfo'])
return result
def lithology_info_to_frame(data):
"""
Takes a data drilling lithology object which is a dict from the Collabor8 response
and flattens it into a normalized Pandas dataframe to be used for further processing.
Parameters
----------
data : the dict containing the lithology object from GraphQL
Structure coming in from the Collabor8 drilling lithology object
{
"data": {
"drilling": {
"lithology": [
{
"dataEntity": {
"name": "34/4-M-2 H",
"uid": "8837"
},
"dataStartTime": "2020-03-27T23:00:00Z",
"dataEndTime": "2020-03-28T23:00:00Z",
"endTime": null,
"startTime": null,
Returned as a normalized dataframe in the form of
. dataStartTime dataEndTime endTime ... trueVerticalDepthTop.value trueVerticalDepthBottom.unitOfMeasurement trueVerticalDepthBottom.value
0 2015-01-01T00:00:00Z 2015-01-01T23:59:59Z None ... 1421.74 m 1814.91
1 2015-01-01T00:00:00Z 2015-01-01T23:59:59Z None ... 1415.82 m 1859.49
"""
result=json_normalize(data,['data','drilling','lithology'])
return result | 0.821939 | 0.662514 |
import grpc
from google.cloud.devtools.cloudbuild_v1.proto import (
cloudbuild_pb2 as google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudBuildStub(object):
"""Creates and manages builds on Google Cloud Platform.
The main concept used by this API is a `Build`, which describes the location
of the source to build, how to build the source, and where to store the
built artifacts, if any.
A user can list previously-requested builds or get builds by their ID to
determine the status of the build.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.FromString,
)
self.ListBuilds = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListBuilds",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsResponse.FromString,
)
self.CancelBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CancelBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CancelBuildRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.FromString,
)
self.RetryBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/RetryBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RetryBuildRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.CreateBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.GetBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.ListBuildTriggers = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListBuildTriggers",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersResponse.FromString,
)
self.DeleteBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/DeleteBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/UpdateBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.RunBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/RunBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RunBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.CreateWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.GetWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.DeleteWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/DeleteWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/UpdateWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.ListWorkerPools = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListWorkerPools",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsResponse.FromString,
)
class CloudBuildServicer(object):
"""Creates and manages builds on Google Cloud Platform.
The main concept used by this API is a `Build`, which describes the location
of the source to build, how to build the source, and where to store the
built artifacts, if any.
A user can list previously-requested builds or get builds by their ID to
determine the status of the build.
"""
def CreateBuild(self, request, context):
"""Starts a build with the specified configuration.
This method returns a long-running `Operation`, which includes the build
ID. Pass the build ID to `GetBuild` to determine the build status (such as
`SUCCESS` or `FAILURE`).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBuild(self, request, context):
"""Returns information about a previously requested build.
The `Build` that is returned includes its status (such as `SUCCESS`,
`FAILURE`, or `WORKING`), and timing information.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBuilds(self, request, context):
"""Lists previously requested builds.
Previously requested builds may still be in-progress, or may have finished
successfully or unsuccessfully.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelBuild(self, request, context):
"""Cancels a build in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RetryBuild(self, request, context):
"""Creates a new build based on the specified build.
This method creates a new build using the original build request, which may
or may not result in an identical build.
For triggered builds:
* Triggered builds resolve to a precise revision; therefore a retry of a
triggered build will result in a build that uses the same revision.
For non-triggered builds that specify `RepoSource`:
* If the original build built from the tip of a branch, the retried build
will build from the tip of that branch, which may not be the same revision
as the original build.
* If the original build specified a commit sha or revision ID, the retried
build will use the identical source.
For builds that specify `StorageSource`:
* If the original build pulled source from Google Cloud Storage without
specifying the generation of the object, the new build will use the current
object, which may be different from the original build source.
* If the original build pulled source from Cloud Storage and specified the
generation of the object, the new build will attempt to use the same
object, which may or may not be available depending on the bucket's
lifecycle management settings.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateBuildTrigger(self, request, context):
"""Creates a new `BuildTrigger`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBuildTrigger(self, request, context):
"""Returns information about a `BuildTrigger`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBuildTriggers(self, request, context):
"""Lists existing `BuildTrigger`s.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteBuildTrigger(self, request, context):
"""Deletes a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateBuildTrigger(self, request, context):
"""Updates a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunBuildTrigger(self, request, context):
"""Runs a `BuildTrigger` at a particular source revision.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateWorkerPool(self, request, context):
"""Creates a `WorkerPool` to run the builds, and returns the new worker pool.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetWorkerPool(self, request, context):
"""Returns information about a `WorkerPool`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteWorkerPool(self, request, context):
"""Deletes a `WorkerPool` by its project ID and WorkerPool name.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateWorkerPool(self, request, context):
"""Update a `WorkerPool`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListWorkerPools(self, request, context):
"""List project's `WorkerPool`s.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_CloudBuildServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateBuild": grpc.unary_unary_rpc_method_handler(
servicer.CreateBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetBuild": grpc.unary_unary_rpc_method_handler(
servicer.GetBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.SerializeToString,
),
"ListBuilds": grpc.unary_unary_rpc_method_handler(
servicer.ListBuilds,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsResponse.SerializeToString,
),
"CancelBuild": grpc.unary_unary_rpc_method_handler(
servicer.CancelBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CancelBuildRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.SerializeToString,
),
"RetryBuild": grpc.unary_unary_rpc_method_handler(
servicer.RetryBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RetryBuildRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"CreateBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.CreateBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"GetBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.GetBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"ListBuildTriggers": grpc.unary_unary_rpc_method_handler(
servicer.ListBuildTriggers,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersResponse.SerializeToString,
),
"DeleteBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.DeleteBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteBuildTriggerRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"UpdateBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.UpdateBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"RunBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.RunBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RunBuildTriggerRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"CreateWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.CreateWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"GetWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.GetWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"DeleteWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteWorkerPoolRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"UpdateWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"ListWorkerPools": grpc.unary_unary_rpc_method_handler(
servicer.ListWorkerPools,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.devtools.cloudbuild.v1.CloudBuild", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,)) | cloudbuild/google/cloud/devtools/cloudbuild_v1/proto/cloudbuild_pb2_grpc.py | import grpc
from google.cloud.devtools.cloudbuild_v1.proto import (
cloudbuild_pb2 as google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class CloudBuildStub(object):
"""Creates and manages builds on Google Cloud Platform.
The main concept used by this API is a `Build`, which describes the location
of the source to build, how to build the source, and where to store the
built artifacts, if any.
A user can list previously-requested builds or get builds by their ID to
determine the status of the build.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.FromString,
)
self.ListBuilds = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListBuilds",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsResponse.FromString,
)
self.CancelBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CancelBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CancelBuildRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.FromString,
)
self.RetryBuild = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/RetryBuild",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RetryBuildRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.CreateBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.GetBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.ListBuildTriggers = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListBuildTriggers",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersResponse.FromString,
)
self.DeleteBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/DeleteBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/UpdateBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.FromString,
)
self.RunBuildTrigger = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/RunBuildTrigger",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RunBuildTriggerRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.CreateWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/CreateWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.GetWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/GetWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.DeleteWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/DeleteWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateWorkerPool = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/UpdateWorkerPool",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateWorkerPoolRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.FromString,
)
self.ListWorkerPools = channel.unary_unary(
"/google.devtools.cloudbuild.v1.CloudBuild/ListWorkerPools",
request_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsResponse.FromString,
)
class CloudBuildServicer(object):
"""Creates and manages builds on Google Cloud Platform.
The main concept used by this API is a `Build`, which describes the location
of the source to build, how to build the source, and where to store the
built artifacts, if any.
A user can list previously-requested builds or get builds by their ID to
determine the status of the build.
"""
def CreateBuild(self, request, context):
"""Starts a build with the specified configuration.
This method returns a long-running `Operation`, which includes the build
ID. Pass the build ID to `GetBuild` to determine the build status (such as
`SUCCESS` or `FAILURE`).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBuild(self, request, context):
"""Returns information about a previously requested build.
The `Build` that is returned includes its status (such as `SUCCESS`,
`FAILURE`, or `WORKING`), and timing information.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBuilds(self, request, context):
"""Lists previously requested builds.
Previously requested builds may still be in-progress, or may have finished
successfully or unsuccessfully.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CancelBuild(self, request, context):
"""Cancels a build in progress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RetryBuild(self, request, context):
"""Creates a new build based on the specified build.
This method creates a new build using the original build request, which may
or may not result in an identical build.
For triggered builds:
* Triggered builds resolve to a precise revision; therefore a retry of a
triggered build will result in a build that uses the same revision.
For non-triggered builds that specify `RepoSource`:
* If the original build built from the tip of a branch, the retried build
will build from the tip of that branch, which may not be the same revision
as the original build.
* If the original build specified a commit sha or revision ID, the retried
build will use the identical source.
For builds that specify `StorageSource`:
* If the original build pulled source from Google Cloud Storage without
specifying the generation of the object, the new build will use the current
object, which may be different from the original build source.
* If the original build pulled source from Cloud Storage and specified the
generation of the object, the new build will attempt to use the same
object, which may or may not be available depending on the bucket's
lifecycle management settings.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateBuildTrigger(self, request, context):
"""Creates a new `BuildTrigger`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetBuildTrigger(self, request, context):
"""Returns information about a `BuildTrigger`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListBuildTriggers(self, request, context):
"""Lists existing `BuildTrigger`s.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteBuildTrigger(self, request, context):
"""Deletes a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateBuildTrigger(self, request, context):
"""Updates a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunBuildTrigger(self, request, context):
"""Runs a `BuildTrigger` at a particular source revision.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateWorkerPool(self, request, context):
"""Creates a `WorkerPool` to run the builds, and returns the new worker pool.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetWorkerPool(self, request, context):
"""Returns information about a `WorkerPool`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteWorkerPool(self, request, context):
"""Deletes a `WorkerPool` by its project ID and WorkerPool name.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateWorkerPool(self, request, context):
"""Update a `WorkerPool`.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListWorkerPools(self, request, context):
"""List project's `WorkerPool`s.
This API is experimental.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_CloudBuildServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateBuild": grpc.unary_unary_rpc_method_handler(
servicer.CreateBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetBuild": grpc.unary_unary_rpc_method_handler(
servicer.GetBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.SerializeToString,
),
"ListBuilds": grpc.unary_unary_rpc_method_handler(
servicer.ListBuilds,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildsResponse.SerializeToString,
),
"CancelBuild": grpc.unary_unary_rpc_method_handler(
servicer.CancelBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CancelBuildRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.Build.SerializeToString,
),
"RetryBuild": grpc.unary_unary_rpc_method_handler(
servicer.RetryBuild,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RetryBuildRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"CreateBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.CreateBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"GetBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.GetBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"ListBuildTriggers": grpc.unary_unary_rpc_method_handler(
servicer.ListBuildTriggers,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListBuildTriggersResponse.SerializeToString,
),
"DeleteBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.DeleteBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteBuildTriggerRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"UpdateBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.UpdateBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateBuildTriggerRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.BuildTrigger.SerializeToString,
),
"RunBuildTrigger": grpc.unary_unary_rpc_method_handler(
servicer.RunBuildTrigger,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.RunBuildTriggerRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"CreateWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.CreateWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.CreateWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"GetWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.GetWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.GetWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"DeleteWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.DeleteWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.DeleteWorkerPoolRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"UpdateWorkerPool": grpc.unary_unary_rpc_method_handler(
servicer.UpdateWorkerPool,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.UpdateWorkerPoolRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.WorkerPool.SerializeToString,
),
"ListWorkerPools": grpc.unary_unary_rpc_method_handler(
servicer.ListWorkerPools,
request_deserializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsRequest.FromString,
response_serializer=google_dot_devtools_dot_cloudbuild__v1_dot_proto_dot_cloudbuild__pb2.ListWorkerPoolsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.devtools.cloudbuild.v1.CloudBuild", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,)) | 0.612889 | 0.078184 |
import sys
import pathlib
from PySide6 import QtCore, QtWidgets, QtGui
from classes import DowTagEditor
from classes import DowConfig
from classes import DowDatabase
from classes import DowMimeType
from classes import DowGlImage
class MainWidget(QtWidgets.QWidget):
def __init__(self, app):
super().__init__()
self.__app = app
self.__config = DowConfig(pathlib.Path(".").joinpath("config.json"))
#Controls
## Menu Bar
self.__file_menu = QtWidgets.QMenu("File")
self.__file_menu.addAction("Open Files", self.__show_open_files_dialog)
self.__file_menu.addAction("Open Directory", self.__show_open_dir_dialog)
self.__file_menu.addAction("Clear Files", self.__clear_files)
self.__file_menu.addAction("Close", self.__close)
self.__menu_bar = QtWidgets.QMenuBar()
self.__menu_bar.addMenu(self.__file_menu)
## Left side
self.__image = DowGlImage(self) #QtWidgets.QLabel()
# self.__image.setAlignment(QtCore.Qt.AlignVCenter)
self.__controls_left_layout = QtWidgets.QVBoxLayout()
self.__left_box = QtWidgets.QWidget()
self.__left_box_layout = QtWidgets.QVBoxLayout(self.__left_box)
self.__left_box_layout.setContentsMargins(QtCore.QMargins(0,0,0,0))
self.__left_box_layout.addWidget(self.__image)
self.__controls_left_layout.addWidget(self.__left_box)
## Right side
self.__search_box = QtWidgets.QLineEdit()
self.__all_tags = QtWidgets.QListView()
self.__all_tags.setMaximumHeight(150)
self.__selected_tags = QtWidgets.QListView()
self.__files = QtWidgets.QListView()
self.__files.setMinimumHeight(200)
self.__files.setMaximumHeight(200)
self.__back_button = QtWidgets.QPushButton("Back")
self.__next_button = QtWidgets.QPushButton("Next")
self.__save_button = QtWidgets.QPushButton("Save")
self.__buttons_right_layout = QtWidgets.QHBoxLayout()
self.__buttons_right_layout.addWidget(self.__back_button)
self.__buttons_right_layout.addWidget(self.__next_button)
self.__buttons_right_layout.addWidget(self.__save_button)
self.__right_box = QtWidgets.QWidget()
self.__right_box.setMinimumWidth(200)
self.__right_box.setMaximumWidth(300)
self.__controls_right_layout = QtWidgets.QVBoxLayout(self.__right_box)
self.__controls_right_layout.setContentsMargins(QtCore.QMargins(0,0,0,0))
self.__controls_right_layout.addWidget(self.__search_box)
self.__controls_right_layout.addWidget(self.__all_tags)
self.__controls_right_layout.addWidget(self.__selected_tags)
self.__controls_right_layout.addWidget(self.__files)
self.__controls_right_layout.addLayout(self.__buttons_right_layout)
#Layouts
self.__main_layout = QtWidgets.QHBoxLayout(self)
self.__main_layout.addLayout(self.__controls_left_layout)
self.__main_layout.addWidget(self.__right_box)
self.__main_layout.setMenuBar(self.__menu_bar)
#Logic
self.__logic = DowTagEditor(self.__all_tags,
self.__selected_tags,
self.__files,
self.__next_button,
self.__back_button,
self.__save_button,
self.__search_box)
self.__logic.SetConfig(self.__config)
if pathlib.Path(self.__config.ROOT_DIR).joinpath(self.__config.DB_NAME).exists():
self.__db = DowDatabase(self.__config.ROOT_DIR, self.__config.DB_NAME)
self.__logic.SetDatabase(self.__db)
self.__logic.ImageChangeEvent(self.__load_image)
@QtCore.Slot()
def __show_open_files_dialog(self):
files = QtWidgets.QFileDialog.getOpenFileNames(self,
"Open Files",
self.__config.ROOT_DIR,
"Images (*.png *.jpeg *.jpg *.bmp *.tiff *.gif *.mp4 *.webm)"
)
self.__logic.AddFiles(files[0])
@QtCore.Slot()
def __show_open_dir_dialog(self):
dirs = QtWidgets.QFileDialog.getExistingDirectory(self,
"Open Directory",
self.__config.ROOT_DIR,
QtWidgets.QFileDialog.ShowDirsOnly |
QtWidgets.QFileDialog.DontResolveSymlinks)
files = list(pathlib.Path(dirs).glob("**/*.*"))
self.__logic.AddFiles(files)
@QtCore.Slot()
def __clear_files(self):
self.__logic.ClearFiles()
@QtCore.Slot()
def __close(self):
exit(0)
@QtCore.Slot()
def __load_image(self):
file_path = pathlib.Path(self.__logic.current_file)
if file_path.name != "" and file_path.exists():
if file_path.suffix in DowMimeType("").image_formats_suffix_list:
self.__image.SetImage(file_path)
else:
self.__image.SetVideo(file_path)
else:
self.__image.Clear()
@QtCore.Slot()
def resizeEvent(self, event: QtGui.QResizeEvent):
self.__load_image()
def keyPressEvent(self, event : QtGui.QKeyEvent):
super(MainWidget, self).keyPressEvent(event)
self.__logic.keyPressEvent(event)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = MainWidget(app)
widget.resize(800, 600)
widget.show()
sys.exit(app.exec()) | dow/tag_editor.py | import sys
import pathlib
from PySide6 import QtCore, QtWidgets, QtGui
from classes import DowTagEditor
from classes import DowConfig
from classes import DowDatabase
from classes import DowMimeType
from classes import DowGlImage
class MainWidget(QtWidgets.QWidget):
def __init__(self, app):
super().__init__()
self.__app = app
self.__config = DowConfig(pathlib.Path(".").joinpath("config.json"))
#Controls
## Menu Bar
self.__file_menu = QtWidgets.QMenu("File")
self.__file_menu.addAction("Open Files", self.__show_open_files_dialog)
self.__file_menu.addAction("Open Directory", self.__show_open_dir_dialog)
self.__file_menu.addAction("Clear Files", self.__clear_files)
self.__file_menu.addAction("Close", self.__close)
self.__menu_bar = QtWidgets.QMenuBar()
self.__menu_bar.addMenu(self.__file_menu)
## Left side
self.__image = DowGlImage(self) #QtWidgets.QLabel()
# self.__image.setAlignment(QtCore.Qt.AlignVCenter)
self.__controls_left_layout = QtWidgets.QVBoxLayout()
self.__left_box = QtWidgets.QWidget()
self.__left_box_layout = QtWidgets.QVBoxLayout(self.__left_box)
self.__left_box_layout.setContentsMargins(QtCore.QMargins(0,0,0,0))
self.__left_box_layout.addWidget(self.__image)
self.__controls_left_layout.addWidget(self.__left_box)
## Right side
self.__search_box = QtWidgets.QLineEdit()
self.__all_tags = QtWidgets.QListView()
self.__all_tags.setMaximumHeight(150)
self.__selected_tags = QtWidgets.QListView()
self.__files = QtWidgets.QListView()
self.__files.setMinimumHeight(200)
self.__files.setMaximumHeight(200)
self.__back_button = QtWidgets.QPushButton("Back")
self.__next_button = QtWidgets.QPushButton("Next")
self.__save_button = QtWidgets.QPushButton("Save")
self.__buttons_right_layout = QtWidgets.QHBoxLayout()
self.__buttons_right_layout.addWidget(self.__back_button)
self.__buttons_right_layout.addWidget(self.__next_button)
self.__buttons_right_layout.addWidget(self.__save_button)
self.__right_box = QtWidgets.QWidget()
self.__right_box.setMinimumWidth(200)
self.__right_box.setMaximumWidth(300)
self.__controls_right_layout = QtWidgets.QVBoxLayout(self.__right_box)
self.__controls_right_layout.setContentsMargins(QtCore.QMargins(0,0,0,0))
self.__controls_right_layout.addWidget(self.__search_box)
self.__controls_right_layout.addWidget(self.__all_tags)
self.__controls_right_layout.addWidget(self.__selected_tags)
self.__controls_right_layout.addWidget(self.__files)
self.__controls_right_layout.addLayout(self.__buttons_right_layout)
#Layouts
self.__main_layout = QtWidgets.QHBoxLayout(self)
self.__main_layout.addLayout(self.__controls_left_layout)
self.__main_layout.addWidget(self.__right_box)
self.__main_layout.setMenuBar(self.__menu_bar)
#Logic
self.__logic = DowTagEditor(self.__all_tags,
self.__selected_tags,
self.__files,
self.__next_button,
self.__back_button,
self.__save_button,
self.__search_box)
self.__logic.SetConfig(self.__config)
if pathlib.Path(self.__config.ROOT_DIR).joinpath(self.__config.DB_NAME).exists():
self.__db = DowDatabase(self.__config.ROOT_DIR, self.__config.DB_NAME)
self.__logic.SetDatabase(self.__db)
self.__logic.ImageChangeEvent(self.__load_image)
@QtCore.Slot()
def __show_open_files_dialog(self):
files = QtWidgets.QFileDialog.getOpenFileNames(self,
"Open Files",
self.__config.ROOT_DIR,
"Images (*.png *.jpeg *.jpg *.bmp *.tiff *.gif *.mp4 *.webm)"
)
self.__logic.AddFiles(files[0])
@QtCore.Slot()
def __show_open_dir_dialog(self):
dirs = QtWidgets.QFileDialog.getExistingDirectory(self,
"Open Directory",
self.__config.ROOT_DIR,
QtWidgets.QFileDialog.ShowDirsOnly |
QtWidgets.QFileDialog.DontResolveSymlinks)
files = list(pathlib.Path(dirs).glob("**/*.*"))
self.__logic.AddFiles(files)
@QtCore.Slot()
def __clear_files(self):
self.__logic.ClearFiles()
@QtCore.Slot()
def __close(self):
exit(0)
@QtCore.Slot()
def __load_image(self):
file_path = pathlib.Path(self.__logic.current_file)
if file_path.name != "" and file_path.exists():
if file_path.suffix in DowMimeType("").image_formats_suffix_list:
self.__image.SetImage(file_path)
else:
self.__image.SetVideo(file_path)
else:
self.__image.Clear()
@QtCore.Slot()
def resizeEvent(self, event: QtGui.QResizeEvent):
self.__load_image()
def keyPressEvent(self, event : QtGui.QKeyEvent):
super(MainWidget, self).keyPressEvent(event)
self.__logic.keyPressEvent(event)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = MainWidget(app)
widget.resize(800, 600)
widget.show()
sys.exit(app.exec()) | 0.307982 | 0.059866 |
ANSIBLE_METADATA = {
'metadata_version': '0.2',
'status': ['preview'],
'supported_by': 'godspeed-you'
}
DOCUMENTATION = """
---
module: pvesh
short_description: Managing Proxmox Nodes and Cluster through the command line tool pvesh
description: With the C(pvesh) module it is possible to use the Proxmox API directly on a Proxmox node instad of going through a HTTPS connection providing user and password in a ansible role.
author: "<NAME> <EMAIL>)"
options:
command:
description: The command to be used. The useable commands for a specific task can seen in the official documentation of the Proxmox API: https://pve.proxmox.com/pve-docs/api-viewer/
required: true
choices:
- ls
- get
- create
- set
- delete
path:
description: The path in the API to work on. Also explained in the official documentation
required: true
options:
description: All other values, that can be specified or are needed for an API call. These values are provided as a dictionary.
required: false
"""
EXAMPLES = """
---
- name: Add a user to proxmox cluster
pvesh:
command: create
path: access/users
options:
userid: myUser@pam
email: <EMAIL>
- name: Get all nodes of proxmox cluster
pvesh:
command: get
path: /nodes
- name: Renew acme certificate
pvesh:
command: set
path: 'nodes/{{ ansible_hostname }}/certificates/acme/certificate'
options:
node: '{{ ansible_fqdn }}'
"""
RETURN = """
---
status:
description: The status code as returned from the API or defined in the module. HTTP status codes are used.
type: int
result:
description: The return value provided by pvesh.
type: dict
command:
description: The exact command created and used by the module.
type: str
"""
import subprocess
import json
from ansible.module_utils.basic import AnsibleModule
def execute_pvesh(handler, api_path, **params):
"""building the command, executing it and providing some basic
classification. TODO: Split into smaller functions."""
command = [
"/usr/bin/pvesh",
handler.lower(),
api_path,
"--output=json"]
for parameter, value in params.items():
command += ["-%s" % parameter, "%s" % value]
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(result, stderr) = pipe.communicate()
if len(stderr) >= 1:
try: # Sometimes pvesh is very kind and provides already a status code
return dict(status=int(stderr[:4]),
stderr_message=stderr[:4],
result=result,
command=command)
except ValueError:
status = 512
if stderr.startswith("No '%s' handler defined for '%s'" % (handler, api_path)):
status = 405
elif "already exists" in stderr:
status = 304
elif "does not exist" in stderr or \
"no such" in stderr or \
"not found" in stderr:
status = 404
return dict(status=status,
stderr_message=stderr,
result=result,
command=command)
if handler in ['set', 'create', 'delete']:
if not result:
status = 204
else:
status = 201
else:
status = 200
try:
result = json.loads(result)
except ValueError:
pass
return dict(status=status,
stderr_message='',
result=result,
command=command)
def map_status(status, command):
""" Each status code leads to a specific ansible status. We map that here!"""
status_map = {'get': {200: 'ok'},
'set': {201: 'changed', 204: 'changed'},
'create': {201: 'changed', 204: 'changed', 304: 'ok'},
'delete': {201: 'changed', 204: 'changed', 404: 'ok'}}
return status_map[command].get(status, 'failed')
def main():
""" Main function to provide pvesh functionality as an Ansible module."""
args = dict(
handler=dict(type='str',
choices=['create', 'delete', 'get', 'ls', 'set', ],
required=True,
aliases=['command']),
path=dict(type='str',
required=True),
options=dict(type='dict',
default={},
required=False),
)
ansible = AnsibleModule(
argument_spec=args,
supports_check_mode=True)
handler = ansible.params['handler']
path = ansible.params['path']
options = ansible.params['options']
result = execute_pvesh(handler, path, **options)
status = result['status']
command = result['command']
result_final = result['result']
check_status = map_status(status, handler)
if check_status == 'ok':
changed = False
elif check_status == 'changed':
changed = True
elif check_status == 'failed':
ansible.fail_json(msg=result.get('stderr_message'),
status=status,
result=result_final,
command=' '.join(command))
ansible_result = dict(
status=status,
changed=changed,
result=result_final,
command=' '.join(command))
ansible.exit_json(**ansible_result)
if __name__ == '__main__':
main() | pvesh.py |
ANSIBLE_METADATA = {
'metadata_version': '0.2',
'status': ['preview'],
'supported_by': 'godspeed-you'
}
DOCUMENTATION = """
---
module: pvesh
short_description: Managing Proxmox Nodes and Cluster through the command line tool pvesh
description: With the C(pvesh) module it is possible to use the Proxmox API directly on a Proxmox node instad of going through a HTTPS connection providing user and password in a ansible role.
author: "<NAME> <EMAIL>)"
options:
command:
description: The command to be used. The useable commands for a specific task can seen in the official documentation of the Proxmox API: https://pve.proxmox.com/pve-docs/api-viewer/
required: true
choices:
- ls
- get
- create
- set
- delete
path:
description: The path in the API to work on. Also explained in the official documentation
required: true
options:
description: All other values, that can be specified or are needed for an API call. These values are provided as a dictionary.
required: false
"""
EXAMPLES = """
---
- name: Add a user to proxmox cluster
pvesh:
command: create
path: access/users
options:
userid: myUser@pam
email: <EMAIL>
- name: Get all nodes of proxmox cluster
pvesh:
command: get
path: /nodes
- name: Renew acme certificate
pvesh:
command: set
path: 'nodes/{{ ansible_hostname }}/certificates/acme/certificate'
options:
node: '{{ ansible_fqdn }}'
"""
RETURN = """
---
status:
description: The status code as returned from the API or defined in the module. HTTP status codes are used.
type: int
result:
description: The return value provided by pvesh.
type: dict
command:
description: The exact command created and used by the module.
type: str
"""
import subprocess
import json
from ansible.module_utils.basic import AnsibleModule
def execute_pvesh(handler, api_path, **params):
"""building the command, executing it and providing some basic
classification. TODO: Split into smaller functions."""
command = [
"/usr/bin/pvesh",
handler.lower(),
api_path,
"--output=json"]
for parameter, value in params.items():
command += ["-%s" % parameter, "%s" % value]
pipe = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(result, stderr) = pipe.communicate()
if len(stderr) >= 1:
try: # Sometimes pvesh is very kind and provides already a status code
return dict(status=int(stderr[:4]),
stderr_message=stderr[:4],
result=result,
command=command)
except ValueError:
status = 512
if stderr.startswith("No '%s' handler defined for '%s'" % (handler, api_path)):
status = 405
elif "already exists" in stderr:
status = 304
elif "does not exist" in stderr or \
"no such" in stderr or \
"not found" in stderr:
status = 404
return dict(status=status,
stderr_message=stderr,
result=result,
command=command)
if handler in ['set', 'create', 'delete']:
if not result:
status = 204
else:
status = 201
else:
status = 200
try:
result = json.loads(result)
except ValueError:
pass
return dict(status=status,
stderr_message='',
result=result,
command=command)
def map_status(status, command):
""" Each status code leads to a specific ansible status. We map that here!"""
status_map = {'get': {200: 'ok'},
'set': {201: 'changed', 204: 'changed'},
'create': {201: 'changed', 204: 'changed', 304: 'ok'},
'delete': {201: 'changed', 204: 'changed', 404: 'ok'}}
return status_map[command].get(status, 'failed')
def main():
""" Main function to provide pvesh functionality as an Ansible module."""
args = dict(
handler=dict(type='str',
choices=['create', 'delete', 'get', 'ls', 'set', ],
required=True,
aliases=['command']),
path=dict(type='str',
required=True),
options=dict(type='dict',
default={},
required=False),
)
ansible = AnsibleModule(
argument_spec=args,
supports_check_mode=True)
handler = ansible.params['handler']
path = ansible.params['path']
options = ansible.params['options']
result = execute_pvesh(handler, path, **options)
status = result['status']
command = result['command']
result_final = result['result']
check_status = map_status(status, handler)
if check_status == 'ok':
changed = False
elif check_status == 'changed':
changed = True
elif check_status == 'failed':
ansible.fail_json(msg=result.get('stderr_message'),
status=status,
result=result_final,
command=' '.join(command))
ansible_result = dict(
status=status,
changed=changed,
result=result_final,
command=' '.join(command))
ansible.exit_json(**ansible_result)
if __name__ == '__main__':
main() | 0.473901 | 0.353205 |
from onagame2015.validations import coord_in_arena, direction_is_valid
from onagame2015.lib import (
GameBaseObject,
Coordinate,
UNIT_TYPE_ATTACK,
UNIT_TYPE_BLOCKED,
UNIT_TYPE_HQ,
)
class BaseUnit(GameBaseObject):
def __init__(self, coordinate, player_id, arena):
self.id = id(self)
self.coordinate = coordinate
self.arena = arena
self.player_id = player_id
self.type = None
class HeadQuarter(BaseUnit):
def __init__(self, coordinate, player_id, initial_units, arena):
super(HeadQuarter, self).__init__(coordinate, player_id, arena)
self.units = initial_units
self.type = UNIT_TYPE_HQ
def __repr__(self):
return 'HQ:{}Id:{}'.format(self.player_id, self.id)
def garrison_unit(self, unit):
self.arena.set_content_on_tile(self.coordinate, unit)
class BlockedPosition(BaseUnit):
def __init__(self, coordinate, arena, rep):
super(BlockedPosition, self).__init__(coordinate, None, arena)
self.rep = rep
self.type = UNIT_TYPE_BLOCKED
def __repr__(self):
return '%s' % self.rep
class AttackUnit(BaseUnit):
def __init__(self, coordinate, player_id, arena):
super(AttackUnit, self).__init__(coordinate, player_id, arena)
self.type = UNIT_TYPE_ATTACK
def __repr__(self):
return 'U:{}Id:{}'.format(self.player_id, self.id)
def __json__(self):
return {'key': 'AttackUnit'}
def move(self, direction):
"""Move attacker into new valid position:
# Direction must be one of ((0, 1), (0, -1), (1, 0), (-1, 0))
# New position must be part of the arena grid
# New position must be occupied by other attack unit of same player, or
empty
@return: :dict: indicating the destination and end
{
'from': <coord>,
'to': <coord>,
}
"""
if not direction_is_valid(direction):
return {
'from': self.coordinate,
'to': self.coordinate,
'error': 'Direction {} is invalid'.format(direction),
}
delta_x, delta_y = direction
latitude = self.coordinate.latitude + delta_x
longitude = self.coordinate.longitude + delta_y
desired_coordinate = Coordinate(latitude, longitude)
if not coord_in_arena(desired_coordinate, self.arena):
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'Invalid position ({}, {})'.format(latitude, longitude),
}
destination_tile = self.arena[desired_coordinate]
if not destination_tile.reachable:
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'Blocked position ({}, {})'.format(latitude, longitude),
}
if destination_tile.hq_for(self.player_id) and not destination_tile.empty:
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'You can place only one unit on your base',
}
if not self.can_invade(destination_tile):
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'All occupiers must be of the same team',
}
# Move from current position to next one
self.arena.move(self, self.coordinate, desired_coordinate)
origin = self.coordinate
self.coordinate = desired_coordinate
return {
'from': origin,
'to': self.coordinate,
'error': '',
}
def can_invade(self, tile):
"""It is possible to invade a tile if
* it is empty
* all units in it are from the same team
* it is the enemy's headquarter and it is empty
"""
return self._all_units_are_mine(tile) or self._enemy_headquarter_alone(tile)
def _all_units_are_mine(self, tile):
"""@return :bool: indicating if all the units in <tile> are from
<self>."""
return all(unit.player_id == self.player_id for unit in tile.items)
def _enemy_headquarter_alone(self, tile):
"""@return :bool: indicating if the <tile> is the enemy HeadQuarter,
and is alone."""
enemy_units = [u for u in tile.items if u.player_id != self.player_id]
return len(enemy_units) == 1 and enemy_units[0].type == UNIT_TYPE_HQ | onagame2015/units.py | from onagame2015.validations import coord_in_arena, direction_is_valid
from onagame2015.lib import (
GameBaseObject,
Coordinate,
UNIT_TYPE_ATTACK,
UNIT_TYPE_BLOCKED,
UNIT_TYPE_HQ,
)
class BaseUnit(GameBaseObject):
def __init__(self, coordinate, player_id, arena):
self.id = id(self)
self.coordinate = coordinate
self.arena = arena
self.player_id = player_id
self.type = None
class HeadQuarter(BaseUnit):
def __init__(self, coordinate, player_id, initial_units, arena):
super(HeadQuarter, self).__init__(coordinate, player_id, arena)
self.units = initial_units
self.type = UNIT_TYPE_HQ
def __repr__(self):
return 'HQ:{}Id:{}'.format(self.player_id, self.id)
def garrison_unit(self, unit):
self.arena.set_content_on_tile(self.coordinate, unit)
class BlockedPosition(BaseUnit):
def __init__(self, coordinate, arena, rep):
super(BlockedPosition, self).__init__(coordinate, None, arena)
self.rep = rep
self.type = UNIT_TYPE_BLOCKED
def __repr__(self):
return '%s' % self.rep
class AttackUnit(BaseUnit):
def __init__(self, coordinate, player_id, arena):
super(AttackUnit, self).__init__(coordinate, player_id, arena)
self.type = UNIT_TYPE_ATTACK
def __repr__(self):
return 'U:{}Id:{}'.format(self.player_id, self.id)
def __json__(self):
return {'key': 'AttackUnit'}
def move(self, direction):
"""Move attacker into new valid position:
# Direction must be one of ((0, 1), (0, -1), (1, 0), (-1, 0))
# New position must be part of the arena grid
# New position must be occupied by other attack unit of same player, or
empty
@return: :dict: indicating the destination and end
{
'from': <coord>,
'to': <coord>,
}
"""
if not direction_is_valid(direction):
return {
'from': self.coordinate,
'to': self.coordinate,
'error': 'Direction {} is invalid'.format(direction),
}
delta_x, delta_y = direction
latitude = self.coordinate.latitude + delta_x
longitude = self.coordinate.longitude + delta_y
desired_coordinate = Coordinate(latitude, longitude)
if not coord_in_arena(desired_coordinate, self.arena):
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'Invalid position ({}, {})'.format(latitude, longitude),
}
destination_tile = self.arena[desired_coordinate]
if not destination_tile.reachable:
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'Blocked position ({}, {})'.format(latitude, longitude),
}
if destination_tile.hq_for(self.player_id) and not destination_tile.empty:
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'You can place only one unit on your base',
}
if not self.can_invade(destination_tile):
return {
'from': self.coordinate,
'to': desired_coordinate,
'error': 'All occupiers must be of the same team',
}
# Move from current position to next one
self.arena.move(self, self.coordinate, desired_coordinate)
origin = self.coordinate
self.coordinate = desired_coordinate
return {
'from': origin,
'to': self.coordinate,
'error': '',
}
def can_invade(self, tile):
"""It is possible to invade a tile if
* it is empty
* all units in it are from the same team
* it is the enemy's headquarter and it is empty
"""
return self._all_units_are_mine(tile) or self._enemy_headquarter_alone(tile)
def _all_units_are_mine(self, tile):
"""@return :bool: indicating if all the units in <tile> are from
<self>."""
return all(unit.player_id == self.player_id for unit in tile.items)
def _enemy_headquarter_alone(self, tile):
"""@return :bool: indicating if the <tile> is the enemy HeadQuarter,
and is alone."""
enemy_units = [u for u in tile.items if u.player_id != self.player_id]
return len(enemy_units) == 1 and enemy_units[0].type == UNIT_TYPE_HQ | 0.708515 | 0.36139 |
import json
import logging
import time
from urllib.request import urlopen, Request
from logging import Formatter, LogRecord
from newrelic.api.time_trace import get_linking_metadata
from newrelic.common.object_names import parse_exc_info
from newrelic.core.config import is_expected_error
def format_exc_info(exc_info):
_, _, fullnames, message = parse_exc_info(exc_info)
fullname = fullnames[0]
formatted = {
"error.class": fullname,
"error.message": message,
}
expected = is_expected_error(exc_info)
if expected is not None:
formatted["error.expected"] = expected
return formatted
class NewRelicContextFormatter(Formatter):
DEFAULT_LOG_RECORD_KEYS = frozenset(vars(LogRecord("", 0, "", 0, "", (), None)))
def __init__(self, *args, **kwargs):
super(NewRelicContextFormatter, self).__init__()
@classmethod
def log_record_to_dict(cls, record):
output = {
"timestamp": int(record.created * 1000),
"message": record.getMessage(),
"log.level": record.levelname,
"logger.name": record.name,
"thread.id": record.thread,
"thread.name": record.threadName,
"process.id": record.process,
"process.name": record.processName,
"file.name": record.pathname,
"line.number": record.lineno,
}
output.update(get_linking_metadata())
DEFAULT_LOG_RECORD_KEYS = cls.DEFAULT_LOG_RECORD_KEYS
if len(record.__dict__) > len(DEFAULT_LOG_RECORD_KEYS):
for key in record.__dict__:
if key not in DEFAULT_LOG_RECORD_KEYS:
output["extra." + key] = getattr(record, key)
if record.exc_info:
output.update(format_exc_info(record.exc_info))
return output
def format(self, record):
def safe_str(object, *args, **kwargs):
"""Convert object to str, catching any errors raised."""
try:
return str(object, *args, **kwargs)
except:
return "<unprintable %s object>" % type(object).__name__
return json.dumps(self.log_record_to_dict(record), default=safe_str, separators=(",", ":"))
class NewRelicLogHandler(logging.Handler):
"""
Implementation was derived from: https://pypi.org/project/new-relic-logger-for-python/0.2.0/
file: newrelic_logger.handlers.py
A class which sends records to a New Relic via its API.
"""
def __init__(self, level=logging.INFO, app_id=0, app_name=None, license_key=None, region="US", ):
"""
Initialize the instance with the region and license_key
"""
super(NewRelicLogHandler, self).__init__(level=level)
self.app_id = app_id
self.app_name = app_name
self.host_us = "log-api.newrelic.com"
self.host_eu = "log-api.eu.newrelic.com"
self.url = "/log/v1"
self.region = region.upper()
self.license_key = license_key
self.setFormatter(NewRelicContextFormatter())
def prepare(self, record):
self.format(record)
record.msg = record.message
record.args = get_linking_metadata()
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Send the record to the New Relic API
"""
try:
record = self.prepare(record)
print(record.getMessage())
data_formatted_dict = json.loads(self.format(record))
data = {
**data_formatted_dict,
"appId": self.app_id,
"labels": {"app": self.app_name},
**record.args,
}
self.send_log(data=data)
except Exception:
self.handleError(record)
def send_log(self, data: {}):
host = self.host_us if self.region == "US" else self.host_eu
req = Request(
url="https://" + host + self.url,
data=json.dumps(data).encode(),
headers={
'X-License-Key': self.license_key,
'Content-Type': "application/json",
},
method="POST"
)
# this line helps to forward logs to newrelic logs api. I made sure to use a python standard lib
# see https://docs.newrelic.com/docs/logs/log-api/introduction-log-api
resp = urlopen(req) # nosec
if resp.status // 100 != 2:
if resp.status == 429:
print("New Relic API Response: Retry-After")
time.sleep(1)
self.send_log(data=data)
return
print("Error sending log to new relic")
print("Status Code: {}".format(resp.status))
print("Reason: {}".format(resp.reason))
print("url: {}".format(resp.url))
print(resp.read().decode())
print("data: {}".format(data)) | newrelic/api/log.py |
import json
import logging
import time
from urllib.request import urlopen, Request
from logging import Formatter, LogRecord
from newrelic.api.time_trace import get_linking_metadata
from newrelic.common.object_names import parse_exc_info
from newrelic.core.config import is_expected_error
def format_exc_info(exc_info):
_, _, fullnames, message = parse_exc_info(exc_info)
fullname = fullnames[0]
formatted = {
"error.class": fullname,
"error.message": message,
}
expected = is_expected_error(exc_info)
if expected is not None:
formatted["error.expected"] = expected
return formatted
class NewRelicContextFormatter(Formatter):
DEFAULT_LOG_RECORD_KEYS = frozenset(vars(LogRecord("", 0, "", 0, "", (), None)))
def __init__(self, *args, **kwargs):
super(NewRelicContextFormatter, self).__init__()
@classmethod
def log_record_to_dict(cls, record):
output = {
"timestamp": int(record.created * 1000),
"message": record.getMessage(),
"log.level": record.levelname,
"logger.name": record.name,
"thread.id": record.thread,
"thread.name": record.threadName,
"process.id": record.process,
"process.name": record.processName,
"file.name": record.pathname,
"line.number": record.lineno,
}
output.update(get_linking_metadata())
DEFAULT_LOG_RECORD_KEYS = cls.DEFAULT_LOG_RECORD_KEYS
if len(record.__dict__) > len(DEFAULT_LOG_RECORD_KEYS):
for key in record.__dict__:
if key not in DEFAULT_LOG_RECORD_KEYS:
output["extra." + key] = getattr(record, key)
if record.exc_info:
output.update(format_exc_info(record.exc_info))
return output
def format(self, record):
def safe_str(object, *args, **kwargs):
"""Convert object to str, catching any errors raised."""
try:
return str(object, *args, **kwargs)
except:
return "<unprintable %s object>" % type(object).__name__
return json.dumps(self.log_record_to_dict(record), default=safe_str, separators=(",", ":"))
class NewRelicLogHandler(logging.Handler):
"""
Implementation was derived from: https://pypi.org/project/new-relic-logger-for-python/0.2.0/
file: newrelic_logger.handlers.py
A class which sends records to a New Relic via its API.
"""
def __init__(self, level=logging.INFO, app_id=0, app_name=None, license_key=None, region="US", ):
"""
Initialize the instance with the region and license_key
"""
super(NewRelicLogHandler, self).__init__(level=level)
self.app_id = app_id
self.app_name = app_name
self.host_us = "log-api.newrelic.com"
self.host_eu = "log-api.eu.newrelic.com"
self.url = "/log/v1"
self.region = region.upper()
self.license_key = license_key
self.setFormatter(NewRelicContextFormatter())
def prepare(self, record):
self.format(record)
record.msg = record.message
record.args = get_linking_metadata()
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Send the record to the New Relic API
"""
try:
record = self.prepare(record)
print(record.getMessage())
data_formatted_dict = json.loads(self.format(record))
data = {
**data_formatted_dict,
"appId": self.app_id,
"labels": {"app": self.app_name},
**record.args,
}
self.send_log(data=data)
except Exception:
self.handleError(record)
def send_log(self, data: {}):
host = self.host_us if self.region == "US" else self.host_eu
req = Request(
url="https://" + host + self.url,
data=json.dumps(data).encode(),
headers={
'X-License-Key': self.license_key,
'Content-Type': "application/json",
},
method="POST"
)
# this line helps to forward logs to newrelic logs api. I made sure to use a python standard lib
# see https://docs.newrelic.com/docs/logs/log-api/introduction-log-api
resp = urlopen(req) # nosec
if resp.status // 100 != 2:
if resp.status == 429:
print("New Relic API Response: Retry-After")
time.sleep(1)
self.send_log(data=data)
return
print("Error sending log to new relic")
print("Status Code: {}".format(resp.status))
print("Reason: {}".format(resp.reason))
print("url: {}".format(resp.url))
print(resp.read().decode())
print("data: {}".format(data)) | 0.554953 | 0.144028 |
import json
import boto3
from boto3.dynamodb.conditions import Key
import datetime
from datetime import datetime, timedelta, date
dynamodb = boto3.resource('dynamodb', region_name='eu-west-2')
def lambda_handler(event, context):
room_to_be_booked = query_room(event)
room_capacity = int(room_to_be_booked[0]['roomCapacity'])
room_is_booked = query_booking(event)
requested_booking_capacity = int(event['queryStringParameters']['capacity'])
if room_is_booked:
return {
'body': {'Message': "Sorry! This room already has a meeting booked at this time."}
}
if not room_to_be_booked:
return {
'body': {'Message': "Sorry! The room you entered doesn't exist, check your spelling and try again."}
}
if room_capacity >= requested_booking_capacity:
table = dynamodb.Table('Room-Bookings')
table.put_item(
Item = {
'roomName': event['queryStringParameters']['roomName'],
'capacity': event['queryStringParameters']['capacity'],
'date': event['queryStringParameters']['date'],
'time': event['queryStringParameters']['time'],
'duration': event['queryStringParameters']['duration'],
'meetingName': event['queryStringParameters']['meetingName'],
'meetingHost': event['queryStringParameters']['meetingHost']
}
)
return {
'statusCode': 200,
'body': {'Message:': "Successfully added booking"},
}
else:
message = 'Sorry! This room only has a capacity of ' + room_to_be_booked[0]['roomCapacity'] + '.'
return {
'body': {'Message': message}
}
def query_room(event):
table = dynamodb.Table('Meeting-Rooms')
response = table.query(
KeyConditionExpression=Key('roomName').eq(event['queryStringParameters']['roomName'])
)
return response['Items']
def query_booking(event):
table = dynamodb.Table('Room-Bookings')
response = table.query(
KeyConditionExpression=Key('roomName').eq(event['queryStringParameters']['roomName'])
)
bookings = []
for booking in response['Items']:
if booking['date'] == event['queryStringParameters']['date']:
bookings.append(booking)
print(bookings)
if not bookings:
return False
else:
return does_datetime_overlap(event, bookings)
def does_datetime_overlap(event, bookings):
requested_duration = int(event['queryStringParameters']['duration'])
requested_start_time = datetime.strptime(event['queryStringParameters']['time'], '%H:%M').time()
requested_end_time = calculate_meeting_end_time(requested_start_time, requested_duration)
list1 = []
for booking in bookings:
booked_start_time = datetime.strptime(booking['time'], '%H:%M').time()
booked_end_time = calculate_meeting_end_time(booked_start_time, int(booking['duration']))
list1.append(is_overlap(requested_end_time, booked_end_time, requested_start_time, booked_start_time))
if list1.count(True) > 0:
return True
else:
return False
def calculate_meeting_end_time(initialTime, duration):
end_time = datetime(100, 1, 1, initialTime.hour, initialTime.minute, initialTime.second)
end_time = end_time + timedelta(minutes=duration)
return end_time.time()
def is_overlap(requested_end_time, booked_end_time, requested_start_time, booked_start_time):
requested_end_time = datetime.combine(date.min, requested_end_time)
booked_end_time = datetime.combine(date.min, booked_end_time)
requested_start_time = datetime.combine(date.min, requested_start_time)
booked_start_time = datetime.combine(date.min, booked_start_time)
delta = min(booked_end_time, requested_end_time)-max(booked_start_time, requested_start_time)
print(delta.total_seconds())
if delta.total_seconds() < 0:
return False
else:
return True | infrastructure/lambda-functions/add-booking.py | import json
import boto3
from boto3.dynamodb.conditions import Key
import datetime
from datetime import datetime, timedelta, date
dynamodb = boto3.resource('dynamodb', region_name='eu-west-2')
def lambda_handler(event, context):
room_to_be_booked = query_room(event)
room_capacity = int(room_to_be_booked[0]['roomCapacity'])
room_is_booked = query_booking(event)
requested_booking_capacity = int(event['queryStringParameters']['capacity'])
if room_is_booked:
return {
'body': {'Message': "Sorry! This room already has a meeting booked at this time."}
}
if not room_to_be_booked:
return {
'body': {'Message': "Sorry! The room you entered doesn't exist, check your spelling and try again."}
}
if room_capacity >= requested_booking_capacity:
table = dynamodb.Table('Room-Bookings')
table.put_item(
Item = {
'roomName': event['queryStringParameters']['roomName'],
'capacity': event['queryStringParameters']['capacity'],
'date': event['queryStringParameters']['date'],
'time': event['queryStringParameters']['time'],
'duration': event['queryStringParameters']['duration'],
'meetingName': event['queryStringParameters']['meetingName'],
'meetingHost': event['queryStringParameters']['meetingHost']
}
)
return {
'statusCode': 200,
'body': {'Message:': "Successfully added booking"},
}
else:
message = 'Sorry! This room only has a capacity of ' + room_to_be_booked[0]['roomCapacity'] + '.'
return {
'body': {'Message': message}
}
def query_room(event):
table = dynamodb.Table('Meeting-Rooms')
response = table.query(
KeyConditionExpression=Key('roomName').eq(event['queryStringParameters']['roomName'])
)
return response['Items']
def query_booking(event):
table = dynamodb.Table('Room-Bookings')
response = table.query(
KeyConditionExpression=Key('roomName').eq(event['queryStringParameters']['roomName'])
)
bookings = []
for booking in response['Items']:
if booking['date'] == event['queryStringParameters']['date']:
bookings.append(booking)
print(bookings)
if not bookings:
return False
else:
return does_datetime_overlap(event, bookings)
def does_datetime_overlap(event, bookings):
requested_duration = int(event['queryStringParameters']['duration'])
requested_start_time = datetime.strptime(event['queryStringParameters']['time'], '%H:%M').time()
requested_end_time = calculate_meeting_end_time(requested_start_time, requested_duration)
list1 = []
for booking in bookings:
booked_start_time = datetime.strptime(booking['time'], '%H:%M').time()
booked_end_time = calculate_meeting_end_time(booked_start_time, int(booking['duration']))
list1.append(is_overlap(requested_end_time, booked_end_time, requested_start_time, booked_start_time))
if list1.count(True) > 0:
return True
else:
return False
def calculate_meeting_end_time(initialTime, duration):
end_time = datetime(100, 1, 1, initialTime.hour, initialTime.minute, initialTime.second)
end_time = end_time + timedelta(minutes=duration)
return end_time.time()
def is_overlap(requested_end_time, booked_end_time, requested_start_time, booked_start_time):
requested_end_time = datetime.combine(date.min, requested_end_time)
booked_end_time = datetime.combine(date.min, booked_end_time)
requested_start_time = datetime.combine(date.min, requested_start_time)
booked_start_time = datetime.combine(date.min, booked_start_time)
delta = min(booked_end_time, requested_end_time)-max(booked_start_time, requested_start_time)
print(delta.total_seconds())
if delta.total_seconds() < 0:
return False
else:
return True | 0.168686 | 0.103115 |
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.google_api.run import google_api
from starthinker.task.bigquery.run import bigquery
def recipe_barnacle_dv360(config, auth_read, auth_write, partner, recipe_slug):
"""Gives DV clients ability to see which users have access to which parts of an
account. Loads DV user profile mappings using the API into BigQuery and
connects to a DataStudio dashboard.
Args:
auth_read (authentication) - Credentials used for writing data.
auth_write (authentication) - Credentials used for writing data.
partner (integer) - Partner ID to run user audit on.
recipe_slug (string) - Name of Google BigQuery dataset to create.
"""
dataset(config, {
'auth':auth_write,
'dataset':recipe_slug
})
google_api(config, {
'auth':auth_read,
'api':'doubleclickbidmanager',
'version':'v1.1',
'function':'queries.listqueries',
'alias':'list',
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Reports'
}
}
})
google_api(config, {
'auth':auth_read,
'api':'displayvideo',
'version':'v1',
'function':'partners.list',
'kwargs':{
'fields':'partners.displayName,partners.partnerId,nextPageToken'
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Partners'
}
}
})
google_api(config, {
'auth':auth_read,
'api':'displayvideo',
'version':'v1',
'function':'advertisers.list',
'kwargs':{
'partnerId':partner,
'fields':'advertisers.displayName,advertisers.advertiserId,nextPageToken'
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Advertisers'
}
}
})
google_api(config, {
'auth':'service',
'api':'displayvideo',
'version':'v1',
'function':'users.list',
'kwargs':{
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Users'
}
}
})
bigquery(config, {
'auth':auth_write,
'from':{
'query':'''SELECT
U.userId,
U.name,
U.email,
U.displayName,
REGEXP_EXTRACT(U.email, r'@(.+)') AS Domain,
IF (ENDS_WITH(U.email, '.gserviceaccount.com'), 'Service', 'User') AS Authentication,
IF((Select COUNT(advertiserId) from UNNEST(U.assignedUserRoles)) = 0, 'Partner', 'Advertiser') AS Scope,
STRUCT(
AUR.partnerId,
P.displayName AS partnerName,
AUR.userRole,
AUR.advertiserId,
A.displayName AS advertiserName,
AUR.assignedUserRoleId
) AS assignedUserRoles,
FROM `{dataset}.DV_Users` AS U,
UNNEST(assignedUserRoles) AS AUR
LEFT JOIN `{dataset}.DV_Partners` AS P
ON AUR.partnerId=P.partnerId
LEFT JOIN `{dataset}.DV_Advertisers` AS A
ON AUR.advertiserId=A.advertiserId ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Barnacle_User_Roles'
}
})
bigquery(config, {
'auth':auth_write,
'from':{
'query':'''SELECT
R.*,
P.displayName AS partnerName,
A.displayName AS advertiserName,
FROM (
SELECT
queryId,
(SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_PARTNER' LIMIT 1) AS partnerId,
(SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_ADVERTISER' LIMIT 1) AS advertiserId,
R.schedule.frequency,
R.params.metrics,
R.params.type,
R.metadata.dataRange,
R.metadata.sendNotification,
DATE(TIMESTAMP_MILLIS(R.metadata.latestReportRunTimeMS)) AS latestReportRunTime,
FROM `{dataset}.DV_Reports` AS R) AS R
LEFT JOIN `{dataset}.DV_Partners` AS P
ON R.partnerId=P.partnerId
LEFT JOIN `{dataset}.DV_Advertisers` AS A
ON R.advertiserId=A.advertiserId ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Barnacle_Reports'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Gives DV clients ability to see which users have access to which parts of an account. Loads DV user profile mappings using the API into BigQuery and connects to a DataStudio dashboard.
1. DV360 only permits SERVICE accounts to access the user list API endpoint, be sure to provide and permission one.
2. Wait for <b>BigQuery->->->DV_*</b> to be created.
3. Wait for <b>BigQuery->->->Barnacle_*</b> to be created, then copy and connect the following data sources.
4. Join the <a href='https://groups.google.com/d/forum/starthinker-assets' target='_blank'>StarThinker Assets Group</a> to access the following assets
5. Copy <a href='https://datastudio.google.com/c/u/0/reporting/9f6b9e62-43ec-4027-849a-287e9c1911bd' target='_blank'>Barnacle DV Report</a>.
6. Click Edit->Resource->Manage added data sources, then edit each connection to connect to your new tables above.
7. Or give these intructions to the client.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for writing data.", default='user')
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-partner", help="Partner ID to run user audit on.", default='')
parser.add_argument("-recipe_slug", help="Name of Google BigQuery dataset to create.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_barnacle_dv360(config, args.auth_read, args.auth_write, args.partner, args.recipe_slug) | examples/barnacle_dv360_example.py |
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.google_api.run import google_api
from starthinker.task.bigquery.run import bigquery
def recipe_barnacle_dv360(config, auth_read, auth_write, partner, recipe_slug):
"""Gives DV clients ability to see which users have access to which parts of an
account. Loads DV user profile mappings using the API into BigQuery and
connects to a DataStudio dashboard.
Args:
auth_read (authentication) - Credentials used for writing data.
auth_write (authentication) - Credentials used for writing data.
partner (integer) - Partner ID to run user audit on.
recipe_slug (string) - Name of Google BigQuery dataset to create.
"""
dataset(config, {
'auth':auth_write,
'dataset':recipe_slug
})
google_api(config, {
'auth':auth_read,
'api':'doubleclickbidmanager',
'version':'v1.1',
'function':'queries.listqueries',
'alias':'list',
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Reports'
}
}
})
google_api(config, {
'auth':auth_read,
'api':'displayvideo',
'version':'v1',
'function':'partners.list',
'kwargs':{
'fields':'partners.displayName,partners.partnerId,nextPageToken'
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Partners'
}
}
})
google_api(config, {
'auth':auth_read,
'api':'displayvideo',
'version':'v1',
'function':'advertisers.list',
'kwargs':{
'partnerId':partner,
'fields':'advertisers.displayName,advertisers.advertiserId,nextPageToken'
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Advertisers'
}
}
})
google_api(config, {
'auth':'service',
'api':'displayvideo',
'version':'v1',
'function':'users.list',
'kwargs':{
},
'results':{
'bigquery':{
'auth':auth_write,
'dataset':recipe_slug,
'table':'DV_Users'
}
}
})
bigquery(config, {
'auth':auth_write,
'from':{
'query':'''SELECT
U.userId,
U.name,
U.email,
U.displayName,
REGEXP_EXTRACT(U.email, r'@(.+)') AS Domain,
IF (ENDS_WITH(U.email, '.gserviceaccount.com'), 'Service', 'User') AS Authentication,
IF((Select COUNT(advertiserId) from UNNEST(U.assignedUserRoles)) = 0, 'Partner', 'Advertiser') AS Scope,
STRUCT(
AUR.partnerId,
P.displayName AS partnerName,
AUR.userRole,
AUR.advertiserId,
A.displayName AS advertiserName,
AUR.assignedUserRoleId
) AS assignedUserRoles,
FROM `{dataset}.DV_Users` AS U,
UNNEST(assignedUserRoles) AS AUR
LEFT JOIN `{dataset}.DV_Partners` AS P
ON AUR.partnerId=P.partnerId
LEFT JOIN `{dataset}.DV_Advertisers` AS A
ON AUR.advertiserId=A.advertiserId ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Barnacle_User_Roles'
}
})
bigquery(config, {
'auth':auth_write,
'from':{
'query':'''SELECT
R.*,
P.displayName AS partnerName,
A.displayName AS advertiserName,
FROM (
SELECT
queryId,
(SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_PARTNER' LIMIT 1) AS partnerId,
(SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_ADVERTISER' LIMIT 1) AS advertiserId,
R.schedule.frequency,
R.params.metrics,
R.params.type,
R.metadata.dataRange,
R.metadata.sendNotification,
DATE(TIMESTAMP_MILLIS(R.metadata.latestReportRunTimeMS)) AS latestReportRunTime,
FROM `{dataset}.DV_Reports` AS R) AS R
LEFT JOIN `{dataset}.DV_Partners` AS P
ON R.partnerId=P.partnerId
LEFT JOIN `{dataset}.DV_Advertisers` AS A
ON R.advertiserId=A.advertiserId ''',
'parameters':{
'dataset':recipe_slug
},
'legacy':False
},
'to':{
'dataset':recipe_slug,
'view':'Barnacle_Reports'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Gives DV clients ability to see which users have access to which parts of an account. Loads DV user profile mappings using the API into BigQuery and connects to a DataStudio dashboard.
1. DV360 only permits SERVICE accounts to access the user list API endpoint, be sure to provide and permission one.
2. Wait for <b>BigQuery->->->DV_*</b> to be created.
3. Wait for <b>BigQuery->->->Barnacle_*</b> to be created, then copy and connect the following data sources.
4. Join the <a href='https://groups.google.com/d/forum/starthinker-assets' target='_blank'>StarThinker Assets Group</a> to access the following assets
5. Copy <a href='https://datastudio.google.com/c/u/0/reporting/9f6b9e62-43ec-4027-849a-287e9c1911bd' target='_blank'>Barnacle DV Report</a>.
6. Click Edit->Resource->Manage added data sources, then edit each connection to connect to your new tables above.
7. Or give these intructions to the client.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for writing data.", default='user')
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-partner", help="Partner ID to run user audit on.", default='')
parser.add_argument("-recipe_slug", help="Name of Google BigQuery dataset to create.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_barnacle_dv360(config, args.auth_read, args.auth_write, args.partner, args.recipe_slug) | 0.546012 | 0.167593 |
from __future__ import absolute_import, division, print_function
from ansible.module_utils.six import iteritems
try:
from openshift.helper.kubernetes import KubernetesObjectHelper
from openshift.helper.openshift import OpenShiftObjectHelper
from openshift.helper.exceptions import KubernetesException
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
class K8sInventoryException(Exception):
pass
class K8sInventoryHelper(object):
helper = None
transport = 'kubectl'
def setup(self, config_data, cache, cache_key):
connections = config_data.get('connections')
if not HAS_K8S_MODULE_HELPER:
raise K8sInventoryException(
"This module requires the OpenShift Python client. Try `pip install openshift`"
)
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
self.fetch_objects(connections)
def fetch_objects(self, connections):
self.helper = self.get_helper('v1', 'namespace_list')
if connections:
if not isinstance(connections, list):
raise K8sInventoryException("Expecting connections to be a list.")
for connection in connections:
if not isinstance(connection, dict):
raise K8sInventoryException("Expecting connection to be a dictionary.")
self.authenticate(connection)
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
if connection.get('namespaces'):
namespaces = connections['namespaces']
else:
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_pods_for_namespace(name, namespace)
self.get_services_for_namespace(name, namespace)
else:
name = self.get_default_host_name(self.helper.api_client.host)
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_pods_for_namespace(name, namespace)
self.get_services_for_namespace(name, namespace)
def authenticate(self, connection=None):
auth_options = {}
if connection:
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
for key, value in iteritems(connection):
if key in auth_args and value is not None:
auth_options[key] = value
try:
self.helper.set_client_config(**auth_options)
except KubernetesException as exc:
raise K8sInventoryException('Error connecting to the API: {0}'.format(exc.message))
@staticmethod
def get_default_host_name(host):
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
def get_helper(self, api_version, kind):
try:
helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False)
helper.get_model(api_version, kind)
return helper
except KubernetesException as exc:
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
def get_available_namespaces(self):
try:
obj = self.helper.get_object()
except KubernetesObjectHelper as exc:
raise K8sInventoryException('Error fetching Namespace list: {0}'.format(exc.message))
return [namespace.metadata.name for namespace in obj.items]
def get_pods_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'pod_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Pod list: {0}'.format(exc.message))
namespace_pod_group = '{0}_pods'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_pod_group)
self.inventory.add_child(namespace, namespace_pod_group)
for pod in obj.items:
pod_name = pod.metadata.name
pod_groups = []
pod_labels = {} if not pod.metadata.labels else pod.metadata.labels
pod_annotations = {} if not pod.metadata.annotations else pod.metadata.annotations
if pod.metadata.labels:
pod_labels = pod.metadata.labels
# create a group for each label_value
for key, value in iteritems(pod.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
if group_name not in pod_groups:
pod_groups.append(group_name)
self.inventory.add_group(group_name)
for container in pod.status.container_statuses:
# add each pod_container to the namespace group, and to each label_value group
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
self.inventory.add_host(container_name)
self.inventory.add_child(namespace_pod_group, container_name)
if pod_groups:
for group in pod_groups:
self.inventory.add_child(group, container_name)
# Add hostvars
self.inventory.set_variable(container_name, 'object_type', 'pod')
self.inventory.set_variable(container_name, 'labels', pod_labels)
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.cluster_name)
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.node_name)
self.inventory.set_variable(container_name, 'pod_name', pod.spec.node_name)
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.host_ip)
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
self.inventory.set_variable(container_name, 'pod_ip', pod.status.pod_ip)
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.self_link)
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resource_version)
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
self.inventory.set_variable(container_name, 'container_name', container.image)
self.inventory.set_variable(container_name, 'container_image', container.image)
if container.state.running:
self.inventory.set_variable(container_name, 'container_state', 'Running')
if container.state.terminated:
self.inventory.set_variable(container_name, 'container_state', 'Terminated')
if container.state.waiting:
self.inventory.set_variable(container_name, 'container_state', 'Waiting')
self.inventory.set_variable(container_name, 'container_ready', container.ready)
self.inventory.set_variable(container_name, 'ansible_connection', self.transport)
self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
pod_name)
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
container.name)
def get_services_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'service_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Service list: {0}'.format(exc.message))
namespace_service_group = '{0}_services'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_service_group)
self.inventory.add_child(namespace, namespace_service_group)
for service in obj.items:
service_name = service.metadata.name
service_labels = {} if not service.metadata.labels else service.metadata.labels
service_annotations = {} if not service.metadata.annotations else service.metadata.annotations
self.inventory.add_host(service_name)
if service.metadata.labels:
# create a group for each label_value
for key, value in iteritems(service.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, service_name)
self.inventory.add_child(namespace_service_group, service_name)
ports = [{'name': port.name,
'port': port.port,
'protocol': port.protocol,
'targetPort': port.target_port,
'nodePort': port.node_port} for port in service.spec.ports]
# add hostvars
self.inventory.set_variable(service_name, 'object_type', 'service')
self.inventory.set_variable(service_name, 'labels', service_labels)
self.inventory.set_variable(service_name, 'annotations', service_annotations)
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.cluster_name)
self.inventory.set_variable(service_name, 'ports', ports)
self.inventory.set_variable(service_name, 'type', service.spec.type)
self.inventory.set_variable(service_name, 'self_link', service.metadata.self_link)
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resource_version)
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
if service.spec.external_traffic_policy:
self.inventory.set_variable(service_name, 'external_traffic_policy',
service.spec.external_traffic_policy)
if hasattr(service.spec, 'external_ips') and service.spec.external_ips:
self.inventory.set_variable(service_name, 'external_ips', service.spec.external_ips)
if service.spec.external_name:
self.inventory.set_variable(service_name, 'external_name', service.spec.external_name)
if service.spec.health_check_node_port:
self.inventory.set_variable(service_name, 'health_check_node_port',
service.spec.health_check_node_port)
if service.spec.load_balancer_ip:
self.inventory.set_variable(service_name, 'load_balancer_ip',
service.spec.load_balancer_ip)
if service.spec.selector:
self.inventory.set_variable(service_name, 'selector', service.spec.selector)
if hasattr(service.status.load_balancer, 'ingress') and service.status.load_balancer.ingress:
load_balancer = [{'hostname': ingress.hostname,
'ip': ingress.ip} for ingress in service.status.load_balancer.ingress]
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
class OpenShiftInventoryHelper(K8sInventoryHelper):
helper = None
transport = 'oc'
def fetch_objects(self, connections):
super(OpenShiftInventoryHelper, self).fetch_objects(connections)
self.helper = self.get_helper('v1', 'namespace_list')
if connections:
for connection in connections:
self.authenticate(connection)
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_routes_for_namespace(name, namespace)
else:
name = self.get_default_host_name(self.helper.api_client.host)
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_routes_for_namespace(name, namespace)
def get_helper(self, api_version, kind):
try:
helper = OpenShiftObjectHelper(api_version=api_version, kind=kind, debug=False)
helper.get_model(api_version, kind)
return helper
except KubernetesException as exc:
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
def get_routes_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'route_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Routes list: {0}'.format(exc.message))
namespace_routes_group = '{0}_routes'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_routes_group)
self.inventory.add_child(namespace, namespace_routes_group)
for route in obj.items:
route_name = route.metadata.name
route_labels = {} if not route.metadata.labels else route.metadata.labels
route_annotations = {} if not route.metadata.annotations else route.metadata.annotations
self.inventory.add_host(route_name)
if route.metadata.labels:
# create a group for each label_value
for key, value in iteritems(route.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, route_name)
self.inventory.add_child(namespace_routes_group, route_name)
# add hostvars
self.inventory.set_variable(route_name, 'labels', route_labels)
self.inventory.set_variable(route_name, 'annotations', route_annotations)
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.cluster_name)
self.inventory.set_variable(route_name, 'object_type', 'route')
self.inventory.set_variable(route_name, 'self_link', route.metadata.self_link)
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resource_version)
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
if route.spec.host:
self.inventory.set_variable(route_name, 'host', route.spec.host)
if route.spec.path:
self.inventory.set_variable(route_name, 'path', route.spec.path)
if hasattr(route.spec.port, 'target_port') and route.spec.port.target_port:
self.inventory.set_variable(route_name, 'port', route.spec.port) | myven/lib/python3.8/site-packages/ansible/module_utils/k8s/inventory.py |
from __future__ import absolute_import, division, print_function
from ansible.module_utils.six import iteritems
try:
from openshift.helper.kubernetes import KubernetesObjectHelper
from openshift.helper.openshift import OpenShiftObjectHelper
from openshift.helper.exceptions import KubernetesException
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
class K8sInventoryException(Exception):
pass
class K8sInventoryHelper(object):
helper = None
transport = 'kubectl'
def setup(self, config_data, cache, cache_key):
connections = config_data.get('connections')
if not HAS_K8S_MODULE_HELPER:
raise K8sInventoryException(
"This module requires the OpenShift Python client. Try `pip install openshift`"
)
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
self.fetch_objects(connections)
def fetch_objects(self, connections):
self.helper = self.get_helper('v1', 'namespace_list')
if connections:
if not isinstance(connections, list):
raise K8sInventoryException("Expecting connections to be a list.")
for connection in connections:
if not isinstance(connection, dict):
raise K8sInventoryException("Expecting connection to be a dictionary.")
self.authenticate(connection)
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
if connection.get('namespaces'):
namespaces = connections['namespaces']
else:
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_pods_for_namespace(name, namespace)
self.get_services_for_namespace(name, namespace)
else:
name = self.get_default_host_name(self.helper.api_client.host)
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_pods_for_namespace(name, namespace)
self.get_services_for_namespace(name, namespace)
def authenticate(self, connection=None):
auth_options = {}
if connection:
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
for key, value in iteritems(connection):
if key in auth_args and value is not None:
auth_options[key] = value
try:
self.helper.set_client_config(**auth_options)
except KubernetesException as exc:
raise K8sInventoryException('Error connecting to the API: {0}'.format(exc.message))
@staticmethod
def get_default_host_name(host):
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
def get_helper(self, api_version, kind):
try:
helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False)
helper.get_model(api_version, kind)
return helper
except KubernetesException as exc:
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
def get_available_namespaces(self):
try:
obj = self.helper.get_object()
except KubernetesObjectHelper as exc:
raise K8sInventoryException('Error fetching Namespace list: {0}'.format(exc.message))
return [namespace.metadata.name for namespace in obj.items]
def get_pods_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'pod_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Pod list: {0}'.format(exc.message))
namespace_pod_group = '{0}_pods'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_pod_group)
self.inventory.add_child(namespace, namespace_pod_group)
for pod in obj.items:
pod_name = pod.metadata.name
pod_groups = []
pod_labels = {} if not pod.metadata.labels else pod.metadata.labels
pod_annotations = {} if not pod.metadata.annotations else pod.metadata.annotations
if pod.metadata.labels:
pod_labels = pod.metadata.labels
# create a group for each label_value
for key, value in iteritems(pod.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
if group_name not in pod_groups:
pod_groups.append(group_name)
self.inventory.add_group(group_name)
for container in pod.status.container_statuses:
# add each pod_container to the namespace group, and to each label_value group
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
self.inventory.add_host(container_name)
self.inventory.add_child(namespace_pod_group, container_name)
if pod_groups:
for group in pod_groups:
self.inventory.add_child(group, container_name)
# Add hostvars
self.inventory.set_variable(container_name, 'object_type', 'pod')
self.inventory.set_variable(container_name, 'labels', pod_labels)
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.cluster_name)
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.node_name)
self.inventory.set_variable(container_name, 'pod_name', pod.spec.node_name)
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.host_ip)
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
self.inventory.set_variable(container_name, 'pod_ip', pod.status.pod_ip)
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.self_link)
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resource_version)
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
self.inventory.set_variable(container_name, 'container_name', container.image)
self.inventory.set_variable(container_name, 'container_image', container.image)
if container.state.running:
self.inventory.set_variable(container_name, 'container_state', 'Running')
if container.state.terminated:
self.inventory.set_variable(container_name, 'container_state', 'Terminated')
if container.state.waiting:
self.inventory.set_variable(container_name, 'container_state', 'Waiting')
self.inventory.set_variable(container_name, 'container_ready', container.ready)
self.inventory.set_variable(container_name, 'ansible_connection', self.transport)
self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
pod_name)
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
container.name)
def get_services_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'service_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Service list: {0}'.format(exc.message))
namespace_service_group = '{0}_services'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_service_group)
self.inventory.add_child(namespace, namespace_service_group)
for service in obj.items:
service_name = service.metadata.name
service_labels = {} if not service.metadata.labels else service.metadata.labels
service_annotations = {} if not service.metadata.annotations else service.metadata.annotations
self.inventory.add_host(service_name)
if service.metadata.labels:
# create a group for each label_value
for key, value in iteritems(service.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, service_name)
self.inventory.add_child(namespace_service_group, service_name)
ports = [{'name': port.name,
'port': port.port,
'protocol': port.protocol,
'targetPort': port.target_port,
'nodePort': port.node_port} for port in service.spec.ports]
# add hostvars
self.inventory.set_variable(service_name, 'object_type', 'service')
self.inventory.set_variable(service_name, 'labels', service_labels)
self.inventory.set_variable(service_name, 'annotations', service_annotations)
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.cluster_name)
self.inventory.set_variable(service_name, 'ports', ports)
self.inventory.set_variable(service_name, 'type', service.spec.type)
self.inventory.set_variable(service_name, 'self_link', service.metadata.self_link)
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resource_version)
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
if service.spec.external_traffic_policy:
self.inventory.set_variable(service_name, 'external_traffic_policy',
service.spec.external_traffic_policy)
if hasattr(service.spec, 'external_ips') and service.spec.external_ips:
self.inventory.set_variable(service_name, 'external_ips', service.spec.external_ips)
if service.spec.external_name:
self.inventory.set_variable(service_name, 'external_name', service.spec.external_name)
if service.spec.health_check_node_port:
self.inventory.set_variable(service_name, 'health_check_node_port',
service.spec.health_check_node_port)
if service.spec.load_balancer_ip:
self.inventory.set_variable(service_name, 'load_balancer_ip',
service.spec.load_balancer_ip)
if service.spec.selector:
self.inventory.set_variable(service_name, 'selector', service.spec.selector)
if hasattr(service.status.load_balancer, 'ingress') and service.status.load_balancer.ingress:
load_balancer = [{'hostname': ingress.hostname,
'ip': ingress.ip} for ingress in service.status.load_balancer.ingress]
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
class OpenShiftInventoryHelper(K8sInventoryHelper):
helper = None
transport = 'oc'
def fetch_objects(self, connections):
super(OpenShiftInventoryHelper, self).fetch_objects(connections)
self.helper = self.get_helper('v1', 'namespace_list')
if connections:
for connection in connections:
self.authenticate(connection)
name = connection.get('name', self.get_default_host_name(self.helper.api_client.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_routes_for_namespace(name, namespace)
else:
name = self.get_default_host_name(self.helper.api_client.host)
namespaces = self.get_available_namespaces()
for namespace in namespaces:
self.get_routes_for_namespace(name, namespace)
def get_helper(self, api_version, kind):
try:
helper = OpenShiftObjectHelper(api_version=api_version, kind=kind, debug=False)
helper.get_model(api_version, kind)
return helper
except KubernetesException as exc:
raise K8sInventoryException('Error initializing object helper: {0}'.format(exc.message))
def get_routes_for_namespace(self, name, namespace):
self.helper.set_model('v1', 'route_list')
try:
obj = self.helper.get_object(namespace=namespace)
except KubernetesException as exc:
raise K8sInventoryException('Error fetching Routes list: {0}'.format(exc.message))
namespace_routes_group = '{0}_routes'.format(namespace)
self.inventory.add_group(name)
self.inventory.add_group(namespace)
self.inventory.add_child(name, namespace)
self.inventory.add_group(namespace_routes_group)
self.inventory.add_child(namespace, namespace_routes_group)
for route in obj.items:
route_name = route.metadata.name
route_labels = {} if not route.metadata.labels else route.metadata.labels
route_annotations = {} if not route.metadata.annotations else route.metadata.annotations
self.inventory.add_host(route_name)
if route.metadata.labels:
# create a group for each label_value
for key, value in iteritems(route.metadata.labels):
group_name = '{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, route_name)
self.inventory.add_child(namespace_routes_group, route_name)
# add hostvars
self.inventory.set_variable(route_name, 'labels', route_labels)
self.inventory.set_variable(route_name, 'annotations', route_annotations)
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.cluster_name)
self.inventory.set_variable(route_name, 'object_type', 'route')
self.inventory.set_variable(route_name, 'self_link', route.metadata.self_link)
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resource_version)
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
if route.spec.host:
self.inventory.set_variable(route_name, 'host', route.spec.host)
if route.spec.path:
self.inventory.set_variable(route_name, 'path', route.spec.path)
if hasattr(route.spec.port, 'target_port') and route.spec.port.target_port:
self.inventory.set_variable(route_name, 'port', route.spec.port) | 0.586286 | 0.091707 |
from musx import Score, Note, Seq, MidiFile, keynum
def sierpinski(score, tone, shape, trans, levels, dur, amp):
"""
Generates a melodic shape based on successive transpositions (levels) of
itself.
Parameters
----------
score : Score
The musical score to add events to.
tone : keynum
The melodic tone on which to base the melody for the current level.
shape : list
A list of intervals defining the melodic shape.
levels : int
The number of levels the melody should be reproduced on.
dur : int | float
The duration of the process.
amp : float
The amplitude of the process.
"""
num = len(shape)
for i in shape:
k = tone + i
# play current tone in melody
n = Note(time=score.now, duration=dur, pitch=min(k,127), amplitude=amp, instrument=0)
score.add(n)
if (levels > 1):
# sprout melody on tone at next level
score.compose(sierpinski(score, (k + trans), shape,
trans, levels - 1, dur / num, amp))
yield dur
if __name__ == "__main__":
# It's good practice to add any metadata such as tempo, midi instrument
# assignments, micro tuning, etc. to track 0 in your midi file.
track0 = MidiFile.metatrack()
# Track 1 will hold the composition.
track1 = Seq()
# Create a scheduler and give it t1 as its output object.
score = Score(out=track1)
# Create the composition. Specify levels and melody length with care!
# The number of events that are generateed is exponentially related to
# the length of the melody and the number of levels. For example the
# first compose() generates 120 events, the second 726, and the third 2728!
score.compose(sierpinski(score, keynum('a0'), [0, 7, 5], 12, 4, 3, .5))
#score.compose(sierpinski(score, keynum('a0'), [0, 7, 5], 8, 5, 7, .5))
#score.compose(sierpinski(score, keynum('a0'), [0, -1, 2, 13], 12, 5, 24, .5))
# Write the tracks to a midi file in the current directory.
file = MidiFile("sierpinski.mid", [track0, track1]).write()
print(f"Wrote '{file.pathname}'.")
# To automatially play demos use setmidiplayer() and playfile().
# Example:
# setmidiplayer("fluidsynth -iq -g1 /usr/local/sf/MuseScore_General.sf2")
# playfile(file.pathname) | demos/sierpinski.py | from musx import Score, Note, Seq, MidiFile, keynum
def sierpinski(score, tone, shape, trans, levels, dur, amp):
"""
Generates a melodic shape based on successive transpositions (levels) of
itself.
Parameters
----------
score : Score
The musical score to add events to.
tone : keynum
The melodic tone on which to base the melody for the current level.
shape : list
A list of intervals defining the melodic shape.
levels : int
The number of levels the melody should be reproduced on.
dur : int | float
The duration of the process.
amp : float
The amplitude of the process.
"""
num = len(shape)
for i in shape:
k = tone + i
# play current tone in melody
n = Note(time=score.now, duration=dur, pitch=min(k,127), amplitude=amp, instrument=0)
score.add(n)
if (levels > 1):
# sprout melody on tone at next level
score.compose(sierpinski(score, (k + trans), shape,
trans, levels - 1, dur / num, amp))
yield dur
if __name__ == "__main__":
# It's good practice to add any metadata such as tempo, midi instrument
# assignments, micro tuning, etc. to track 0 in your midi file.
track0 = MidiFile.metatrack()
# Track 1 will hold the composition.
track1 = Seq()
# Create a scheduler and give it t1 as its output object.
score = Score(out=track1)
# Create the composition. Specify levels and melody length with care!
# The number of events that are generateed is exponentially related to
# the length of the melody and the number of levels. For example the
# first compose() generates 120 events, the second 726, and the third 2728!
score.compose(sierpinski(score, keynum('a0'), [0, 7, 5], 12, 4, 3, .5))
#score.compose(sierpinski(score, keynum('a0'), [0, 7, 5], 8, 5, 7, .5))
#score.compose(sierpinski(score, keynum('a0'), [0, -1, 2, 13], 12, 5, 24, .5))
# Write the tracks to a midi file in the current directory.
file = MidiFile("sierpinski.mid", [track0, track1]).write()
print(f"Wrote '{file.pathname}'.")
# To automatially play demos use setmidiplayer() and playfile().
# Example:
# setmidiplayer("fluidsynth -iq -g1 /usr/local/sf/MuseScore_General.sf2")
# playfile(file.pathname) | 0.621885 | 0.494385 |
from datetime import date, timedelta
from django.test import TestCase
from django.urls import reverse
from bookclubs.models import User, Book, Club, Meeting, MeetingAttendance, Role
from bookclubs.tests.helpers import reverse_with_next
class MeetingListViewTestCase(TestCase):
"""Tests for showing list of club meetings"""
VIEW = 'meeting_list'
fixtures = [
'bookclubs/tests/fixtures/default_user.json',
'bookclubs/tests/fixtures/other_users.json',
'bookclubs/tests/fixtures/default_clubs.json',
'bookclubs/tests/fixtures/default_book.json',
'bookclubs/tests/fixtures/other_books.json',
]
def setUp(self):
self.host = User.objects.get(username='@johndoe')
self.user = User.objects.get(username='@janedoe')
self.book = Book.objects.get(ISBN='0195153448')
self.another_book = Book.objects.get(ISBN='0002005018')
self.club = Club.objects.get(club_name='private_online')
Role.objects.create(
user=self.host,
club=self.club,
club_role='MEM'
)
self.user_role = Role.objects.create(
user=self.user,
club=self.club,
club_role='MEM'
)
self.meeting = Meeting.objects.create(
club=self.club,
book=self.book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() + timedelta(days=4),
time_start='10:00',
duration=60
)
self.another_meeting = Meeting.objects.create(
club=self.club,
book=self.another_book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() + timedelta(days=5),
time_start='10:00',
duration=60
)
MeetingAttendance.objects.create(
user=self.host,
meeting=self.meeting,
meeting_role='H'
)
MeetingAttendance.objects.create(
user=self.user,
meeting=self.another_meeting,
meeting_role='H'
)
self.url = reverse(self.VIEW, kwargs={'club_name': self.club.club_name})
def log_in(self, user):
self.client.login(username=user.username, password="<PASSWORD>")
def test_meeting_list_url(self):
self.assertEqual(self.url, f'/club/{self.club.club_name}/meeting_list/')
def test_meeting_list_with_invalid_club(self):
self.log_in(self.user)
url = reverse(self.VIEW, kwargs={'club_name': 'invalid'})
redirect_url = reverse('feed')
response = self.client.get(url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_redirects_when_not_logged_in(self):
redirect_url = reverse_with_next('log_in', self.url)
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_redirects_when_not_a_member(self):
self.log_in(self.user)
self.user_role.delete()
redirect_url = reverse('feed')
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_successful_when_not_attendee_or_host(self):
self.log_in(self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_meeting_list_successful_when_attendee(self):
self.log_in(self.user)
MeetingAttendance.objects.create(user=self.user, meeting=self.meeting, meeting_role='A')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_meeting_list_successful_when_host(self):
self.log_in(self.host)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_past_meeting_not_in_current_meetings(self):
self.log_in(self.host)
self.meeting.delete()
self.another_meeting.delete()
past_meeting = Meeting.objects.create(
club=self.club,
book=self.another_book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() - timedelta(days=5),
time_start='10:00',
duration=60
)
MeetingAttendance.objects.create(
user=self.host,
meeting=past_meeting,
meeting_role='H'
)
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertNotContains(response, f'{past_meeting.book.title}') | bookclubs/tests/views/meeting_views/test_meeting_list_view.py | from datetime import date, timedelta
from django.test import TestCase
from django.urls import reverse
from bookclubs.models import User, Book, Club, Meeting, MeetingAttendance, Role
from bookclubs.tests.helpers import reverse_with_next
class MeetingListViewTestCase(TestCase):
"""Tests for showing list of club meetings"""
VIEW = 'meeting_list'
fixtures = [
'bookclubs/tests/fixtures/default_user.json',
'bookclubs/tests/fixtures/other_users.json',
'bookclubs/tests/fixtures/default_clubs.json',
'bookclubs/tests/fixtures/default_book.json',
'bookclubs/tests/fixtures/other_books.json',
]
def setUp(self):
self.host = User.objects.get(username='@johndoe')
self.user = User.objects.get(username='@janedoe')
self.book = Book.objects.get(ISBN='0195153448')
self.another_book = Book.objects.get(ISBN='0002005018')
self.club = Club.objects.get(club_name='private_online')
Role.objects.create(
user=self.host,
club=self.club,
club_role='MEM'
)
self.user_role = Role.objects.create(
user=self.user,
club=self.club,
club_role='MEM'
)
self.meeting = Meeting.objects.create(
club=self.club,
book=self.book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() + timedelta(days=4),
time_start='10:00',
duration=60
)
self.another_meeting = Meeting.objects.create(
club=self.club,
book=self.another_book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() + timedelta(days=5),
time_start='10:00',
duration=60
)
MeetingAttendance.objects.create(
user=self.host,
meeting=self.meeting,
meeting_role='H'
)
MeetingAttendance.objects.create(
user=self.user,
meeting=self.another_meeting,
meeting_role='H'
)
self.url = reverse(self.VIEW, kwargs={'club_name': self.club.club_name})
def log_in(self, user):
self.client.login(username=user.username, password="<PASSWORD>")
def test_meeting_list_url(self):
self.assertEqual(self.url, f'/club/{self.club.club_name}/meeting_list/')
def test_meeting_list_with_invalid_club(self):
self.log_in(self.user)
url = reverse(self.VIEW, kwargs={'club_name': 'invalid'})
redirect_url = reverse('feed')
response = self.client.get(url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_redirects_when_not_logged_in(self):
redirect_url = reverse_with_next('log_in', self.url)
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_redirects_when_not_a_member(self):
self.log_in(self.user)
self.user_role.delete()
redirect_url = reverse('feed')
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
def test_meeting_list_successful_when_not_attendee_or_host(self):
self.log_in(self.user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_meeting_list_successful_when_attendee(self):
self.log_in(self.user)
MeetingAttendance.objects.create(user=self.user, meeting=self.meeting, meeting_role='A')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_meeting_list_successful_when_host(self):
self.log_in(self.host)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertContains(response, f'{self.meeting.book.title}')
self.assertContains(response, f'{self.another_meeting.book.title}')
def test_past_meeting_not_in_current_meetings(self):
self.log_in(self.host)
self.meeting.delete()
self.another_meeting.delete()
past_meeting = Meeting.objects.create(
club=self.club,
book=self.another_book,
topic='alpha bravo charlie',
description='delta foxtrot golf hotel india',
meeting_status='OFF',
location='Bush House',
date=date.today() - timedelta(days=5),
time_start='10:00',
duration=60
)
MeetingAttendance.objects.create(
user=self.host,
meeting=past_meeting,
meeting_role='H'
)
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'meeting_list.html')
self.assertNotContains(response, f'{past_meeting.book.title}') | 0.585101 | 0.169767 |
import argparse
import multiprocessing
import sys
import orcha.properties
from ..plugins import ListPlugin, query_plugins
from ..utils.logging_utils import get_logger
from ..utils.packages import version
# application universal logger
log = get_logger()
def main():
"""Main application entry point. Multiple arguments are defined which allows
further customization of the server/client process:
--listen-address ADDRESS defines the IP address used when serving/connecting the
application. By default, it is ``127.0.0.1``.
--port N indicates which port is used during communication.
By default, it is **50000**.
--key KEY defines the authentication key used during communication.
This field is not mandatory but **it is recommended to define it**
as it will be necessary for other processes to communicate with the
service itself.
The application automatically detects the plugins that are installed in the system. It
is important that the installed plugins follows the name convention in order to be
correctly identified. In other case, the subcommands won't appear here. For more
details, see :class:`BasePlugin`.
Returns:
int: execution return code. Multiple return codes are possible:
+ ``0`` means that the execution was successful.
+ ``1`` refers to a standard error happened during execution.
+ ``127`` indicates that no plugins were found or no plugins
can handle the parsed command line options.
.. versionchanged:: 0.1.11
+ ``key`` parameter is now required, the internally generated one won't be used anymore.
+ Orcha clients in Python <= 3.7 now have their internal digest fixed, not throwing an
exception anymore.
.. versionchanged:: 0.1.12
+ ``key`` parameter is not mandatory (again) - some plugins may not require it for
their basic functionality.
"""
parser = argparse.ArgumentParser(
description="Orcha command line utility for handling services",
prog="orcha",
)
parser.add_argument(
"--listen-address",
metavar="ADDRESS",
type=str,
default="127.0.0.1",
help="Listen address of the service",
)
parser.add_argument(
"--port",
metavar="N",
type=int,
default=50000,
help="Listen port of the service",
)
parser.add_argument(
"--key",
metavar="KEY",
type=str,
default=None,
help="Authentication key used for verifying clients",
)
parser.add_argument("--version", action="version", version=f"orcha - {version('orcha')}")
subparsers = parser.add_subparsers(
title="available commands", required=True, metavar="command"
)
discovered_plugins = query_plugins()
plugins = [plugin(subparsers) for plugin in discovered_plugins]
# add our embedded ListPlugin to the list of available plugins
plugins.append(ListPlugin(subparsers))
args: argparse.Namespace = parser.parse_args()
orcha.properties.listen_address = args.listen_address
orcha.properties.port = args.port
if args.key is not None:
orcha.properties.authkey = args.key.encode()
log.debug("fixing internal digest key")
multiprocessing.current_process().authkey = args.key.encode()
for arg, value in vars(args).items():
orcha.properties.extras[arg] = value
for plugin in plugins:
if plugin.can_handle(args.owner):
return plugin.handle(args)
return 127
if __name__ == "__main__":
sys.exit(main()) | orcha/bin/main.py | import argparse
import multiprocessing
import sys
import orcha.properties
from ..plugins import ListPlugin, query_plugins
from ..utils.logging_utils import get_logger
from ..utils.packages import version
# application universal logger
log = get_logger()
def main():
"""Main application entry point. Multiple arguments are defined which allows
further customization of the server/client process:
--listen-address ADDRESS defines the IP address used when serving/connecting the
application. By default, it is ``127.0.0.1``.
--port N indicates which port is used during communication.
By default, it is **50000**.
--key KEY defines the authentication key used during communication.
This field is not mandatory but **it is recommended to define it**
as it will be necessary for other processes to communicate with the
service itself.
The application automatically detects the plugins that are installed in the system. It
is important that the installed plugins follows the name convention in order to be
correctly identified. In other case, the subcommands won't appear here. For more
details, see :class:`BasePlugin`.
Returns:
int: execution return code. Multiple return codes are possible:
+ ``0`` means that the execution was successful.
+ ``1`` refers to a standard error happened during execution.
+ ``127`` indicates that no plugins were found or no plugins
can handle the parsed command line options.
.. versionchanged:: 0.1.11
+ ``key`` parameter is now required, the internally generated one won't be used anymore.
+ Orcha clients in Python <= 3.7 now have their internal digest fixed, not throwing an
exception anymore.
.. versionchanged:: 0.1.12
+ ``key`` parameter is not mandatory (again) - some plugins may not require it for
their basic functionality.
"""
parser = argparse.ArgumentParser(
description="Orcha command line utility for handling services",
prog="orcha",
)
parser.add_argument(
"--listen-address",
metavar="ADDRESS",
type=str,
default="127.0.0.1",
help="Listen address of the service",
)
parser.add_argument(
"--port",
metavar="N",
type=int,
default=50000,
help="Listen port of the service",
)
parser.add_argument(
"--key",
metavar="KEY",
type=str,
default=None,
help="Authentication key used for verifying clients",
)
parser.add_argument("--version", action="version", version=f"orcha - {version('orcha')}")
subparsers = parser.add_subparsers(
title="available commands", required=True, metavar="command"
)
discovered_plugins = query_plugins()
plugins = [plugin(subparsers) for plugin in discovered_plugins]
# add our embedded ListPlugin to the list of available plugins
plugins.append(ListPlugin(subparsers))
args: argparse.Namespace = parser.parse_args()
orcha.properties.listen_address = args.listen_address
orcha.properties.port = args.port
if args.key is not None:
orcha.properties.authkey = args.key.encode()
log.debug("fixing internal digest key")
multiprocessing.current_process().authkey = args.key.encode()
for arg, value in vars(args).items():
orcha.properties.extras[arg] = value
for plugin in plugins:
if plugin.can_handle(args.owner):
return plugin.handle(args)
return 127
if __name__ == "__main__":
sys.exit(main()) | 0.498291 | 0.202798 |
import hashlib
import struct
from collections import OrderedDict
from typing import IO, Any, Optional, Iterable, Mapping, Dict, \
NamedTuple, ClassVar, TypeVar, Type
from pymap.mailbox import MailboxSnapshot
from .io import FileWriteable
__all__ = ['Record', 'UidList']
_UDT = TypeVar('_UDT', bound='UidList')
class Record(NamedTuple):
"""Defines a single record read from the UID list file.
Args:
uid: The message UID of the record.
fields: The metadata fields of the record.
filename: The filename of the record.
"""
uid: int
fields: Mapping[str, Any]
filename: str
@property
def key(self) -> str:
"""The :class:`~mailbox.Maildir` key value."""
return self.filename.split(':', 1)[0]
class UidList(FileWriteable):
"""Maintains the file with UID mapping to maildir files.
Args:
base_dir: The directory of the file.
uid_validity: The UID validity value.
next_uid: The next assignable message UID value.
global_uid: The 128-bit global mailbox UID.
"""
#: The UID list file name, stored in the mailbox directory.
FILE_NAME: ClassVar[str] = 'dovecot-uidlist'
#: The UID list lock file, stored adjacent to the UID list file.
LOCK_FILE: ClassVar[str] = 'dovecot-uidlist.lock'
def __init__(self, base_dir: str, uid_validity: int,
next_uid: int, global_uid: bytes = None) -> None:
super().__init__()
self._base_dir = base_dir
self.uid_validity = uid_validity
self.next_uid = next_uid
self.global_uid = global_uid or self._create_guid(base_dir)
self._records: Dict[int, Record] = OrderedDict()
@property
def records(self) -> Iterable[Record]:
"""The records contained in the UID list file."""
return self._records.values()
def get(self, uid: int) -> Record:
"""Get a single record by its UID.
Args:
uid: The message UID.
Raises:
KeyError: The UID does not exist.
"""
return self._records[uid]
def get_all(self, uids: Iterable[int]) -> Mapping[int, Record]:
"""Get records by a set of UIDs.
Args:
uids: The message UIDs.
"""
return {uid: self._records[uid] for uid in uids
if uid in self._records}
def set(self, rec: Record) -> None:
"""Add or update the record in the UID list file."""
self._records[rec.uid] = rec
def remove(self, uid: int) -> None:
"""Remove the record from the UID list file.
Raises:
KeyError: The UID does not exist.
"""
del self._records[uid]
@classmethod
def _build_line(cls, rec: Record) -> str:
parts = ['%d' % rec.uid]
for key, val in sorted(rec.fields.items()):
parts.append(' ')
parts.append(key[0:1])
parts.append(str(val))
parts.append(' :')
parts.append(rec.filename)
parts.append('\r\n')
return ''.join(parts)
@classmethod
def _read_line(cls, line: str) -> Record:
before, filename = line.split(':', 1)
fields: Dict[str, str] = {}
data = before.split(' ')
num = int(data[0])
for col in data[1:]:
if col:
fields[col[0]] = col[1:]
return Record(num, fields, filename.rstrip())
@classmethod
def _read_guid_hex(cls, field: str) -> bytes:
split = int(len(field) / 2)
left, right = int(field[0:split], 16), int(field[split:], 16)
return struct.pack('=QQ', left, right)
@classmethod
def _read_header(cls: Type[_UDT], base_dir: str, line: str) -> _UDT:
data = line.split()
if data[0] != '3':
raise ValueError(line)
uid_validity: Optional[int] = None
next_uid: Optional[int] = None
global_uid: Optional[bytes] = None
for field in data[1:]:
if field[0] == 'V':
uid_validity = int(field[1:])
elif field[0] == 'N':
next_uid = int(field[1:])
elif field[0] == 'G':
global_uid = cls._read_guid_hex(field[1:])
if uid_validity is None or next_uid is None or global_uid is None:
raise ValueError(line)
return cls(base_dir, uid_validity, next_uid, global_uid)
def _create_guid(self, base_dir: str) -> bytes:
ret = hashlib.sha256()
ret.update(base_dir.encode('utf-8', 'replace'))
ret.update(struct.pack('=L', self.uid_validity))
return ret.digest()[0:16]
def _get_guid_hex(self) -> str:
left, right = struct.unpack('=QQ', self.global_uid)
return format(left, 'x') + format(right, 'x')
def _build_header(self) -> str:
return ''.join(['3 V', str(self.uid_validity),
' N', str(self.next_uid),
' G', self._get_guid_hex(), '\r\n'])
@classmethod
def get_file(cls) -> str:
return cls.FILE_NAME
@classmethod
def get_lock(cls) -> str:
return cls.LOCK_FILE
def get_dir(self) -> str:
return self._base_dir
@classmethod
def get_default(cls: Type[_UDT], base_dir: str) -> _UDT:
return cls(base_dir, MailboxSnapshot.new_uid_validity(), 1)
def write(self, fp: IO[str]) -> None:
fp.write(self._build_header())
for rec in self.records:
fp.write(self._build_line(rec))
@classmethod
def open(cls: Type[_UDT], base_dir: str, fp: IO[str]) -> _UDT:
header = fp.readline()
ret = cls._read_header(base_dir, header)
return ret
def read(self, fp: IO[str]) -> None:
for line in fp:
self.set(self._read_line(line)) | pymap/backend/maildir/uidlist.py | import hashlib
import struct
from collections import OrderedDict
from typing import IO, Any, Optional, Iterable, Mapping, Dict, \
NamedTuple, ClassVar, TypeVar, Type
from pymap.mailbox import MailboxSnapshot
from .io import FileWriteable
__all__ = ['Record', 'UidList']
_UDT = TypeVar('_UDT', bound='UidList')
class Record(NamedTuple):
"""Defines a single record read from the UID list file.
Args:
uid: The message UID of the record.
fields: The metadata fields of the record.
filename: The filename of the record.
"""
uid: int
fields: Mapping[str, Any]
filename: str
@property
def key(self) -> str:
"""The :class:`~mailbox.Maildir` key value."""
return self.filename.split(':', 1)[0]
class UidList(FileWriteable):
"""Maintains the file with UID mapping to maildir files.
Args:
base_dir: The directory of the file.
uid_validity: The UID validity value.
next_uid: The next assignable message UID value.
global_uid: The 128-bit global mailbox UID.
"""
#: The UID list file name, stored in the mailbox directory.
FILE_NAME: ClassVar[str] = 'dovecot-uidlist'
#: The UID list lock file, stored adjacent to the UID list file.
LOCK_FILE: ClassVar[str] = 'dovecot-uidlist.lock'
def __init__(self, base_dir: str, uid_validity: int,
next_uid: int, global_uid: bytes = None) -> None:
super().__init__()
self._base_dir = base_dir
self.uid_validity = uid_validity
self.next_uid = next_uid
self.global_uid = global_uid or self._create_guid(base_dir)
self._records: Dict[int, Record] = OrderedDict()
@property
def records(self) -> Iterable[Record]:
"""The records contained in the UID list file."""
return self._records.values()
def get(self, uid: int) -> Record:
"""Get a single record by its UID.
Args:
uid: The message UID.
Raises:
KeyError: The UID does not exist.
"""
return self._records[uid]
def get_all(self, uids: Iterable[int]) -> Mapping[int, Record]:
"""Get records by a set of UIDs.
Args:
uids: The message UIDs.
"""
return {uid: self._records[uid] for uid in uids
if uid in self._records}
def set(self, rec: Record) -> None:
"""Add or update the record in the UID list file."""
self._records[rec.uid] = rec
def remove(self, uid: int) -> None:
"""Remove the record from the UID list file.
Raises:
KeyError: The UID does not exist.
"""
del self._records[uid]
@classmethod
def _build_line(cls, rec: Record) -> str:
parts = ['%d' % rec.uid]
for key, val in sorted(rec.fields.items()):
parts.append(' ')
parts.append(key[0:1])
parts.append(str(val))
parts.append(' :')
parts.append(rec.filename)
parts.append('\r\n')
return ''.join(parts)
@classmethod
def _read_line(cls, line: str) -> Record:
before, filename = line.split(':', 1)
fields: Dict[str, str] = {}
data = before.split(' ')
num = int(data[0])
for col in data[1:]:
if col:
fields[col[0]] = col[1:]
return Record(num, fields, filename.rstrip())
@classmethod
def _read_guid_hex(cls, field: str) -> bytes:
split = int(len(field) / 2)
left, right = int(field[0:split], 16), int(field[split:], 16)
return struct.pack('=QQ', left, right)
@classmethod
def _read_header(cls: Type[_UDT], base_dir: str, line: str) -> _UDT:
data = line.split()
if data[0] != '3':
raise ValueError(line)
uid_validity: Optional[int] = None
next_uid: Optional[int] = None
global_uid: Optional[bytes] = None
for field in data[1:]:
if field[0] == 'V':
uid_validity = int(field[1:])
elif field[0] == 'N':
next_uid = int(field[1:])
elif field[0] == 'G':
global_uid = cls._read_guid_hex(field[1:])
if uid_validity is None or next_uid is None or global_uid is None:
raise ValueError(line)
return cls(base_dir, uid_validity, next_uid, global_uid)
def _create_guid(self, base_dir: str) -> bytes:
ret = hashlib.sha256()
ret.update(base_dir.encode('utf-8', 'replace'))
ret.update(struct.pack('=L', self.uid_validity))
return ret.digest()[0:16]
def _get_guid_hex(self) -> str:
left, right = struct.unpack('=QQ', self.global_uid)
return format(left, 'x') + format(right, 'x')
def _build_header(self) -> str:
return ''.join(['3 V', str(self.uid_validity),
' N', str(self.next_uid),
' G', self._get_guid_hex(), '\r\n'])
@classmethod
def get_file(cls) -> str:
return cls.FILE_NAME
@classmethod
def get_lock(cls) -> str:
return cls.LOCK_FILE
def get_dir(self) -> str:
return self._base_dir
@classmethod
def get_default(cls: Type[_UDT], base_dir: str) -> _UDT:
return cls(base_dir, MailboxSnapshot.new_uid_validity(), 1)
def write(self, fp: IO[str]) -> None:
fp.write(self._build_header())
for rec in self.records:
fp.write(self._build_line(rec))
@classmethod
def open(cls: Type[_UDT], base_dir: str, fp: IO[str]) -> _UDT:
header = fp.readline()
ret = cls._read_header(base_dir, header)
return ret
def read(self, fp: IO[str]) -> None:
for line in fp:
self.set(self._read_line(line)) | 0.890223 | 0.247993 |
from typing import Optional
import traceback
import os
import sqlite3
import time
import re
from threading import Thread
from flask import render_template, jsonify
import requests
from framework import path_data, scheduler, app, db, celery
from framework.common.plugin import LogicModuleBase, default_route_socketio
from .plugin import P
from .logic_queue import LogicQueue
from .model import ModelScheduler
from .api_youtube_dl import APIYoutubeDL
logger = P.logger
package_name = P.package_name
ModelSetting = P.ModelSetting
class LogicMain(LogicModuleBase):
db_default = {
'db_version': '2',
f'{package_name}_interval': '* * * * *',
f'{package_name}_auto_start': 'False',
'default_save_path': os.path.join(path_data, 'download', package_name),
'default_filename': '%(title)s.%(id)s.%(ext)s',
'cookiefile_path': ''
}
def __init__(self, p):
super(LogicMain, self).__init__(p, None, scheduler_desc='V LIVE 새로운 영상 다운로드')
self.name = package_name # 모듈명
default_route_socketio(p, self)
def plugin_load(self):
try:
LogicQueue.queue_load()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
def process_menu(self, sub, req):
try:
arg = {
'package_name': package_name,
'sub': package_name,
'template_name': f'{package_name}_{sub}'
}
if sub == 'setting':
arg.update(ModelSetting.to_dict())
job_id = f'{self.P.package_name}_{self.name}'
arg['scheduler'] = str(scheduler.is_include(job_id))
arg['is_running'] = str(scheduler.is_running(job_id))
arg['path_data'] = path_data
elif sub == 'recent':
arg['url'] = req.args.get('url', '')
arg['recent_html'] = LogicMain.get_recent_html()
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
elif sub == 'scheduler':
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
return render_template(f'{package_name}_{sub}.html', arg=arg)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return render_template('sample.html', title=f'{package_name} - {sub}')
def process_ajax(self, sub, req):
try:
logger.debug('AJAX: %s, %s', sub, req.values)
ret = {'ret': 'success'}
if sub == 'add_download':
ret['msg'] = f'{LogicMain.download(req.form)}개를 큐에 추가하였습니다.'
elif sub == 'list_scheduler':
ret['data'] = LogicMain.get_scheduler()
elif sub == 'add_scheduler':
if LogicMain.add_scheduler(req.form):
ret['msg'] = '스케줄을 저장하였습니다.'
else:
ret['ret'] = 'warning'
ret['msg'] = 'V LIVE 채널을 분석하지 못했습니다.'
elif sub == 'del_scheduler':
LogicMain.del_scheduler(req.form['id'])
ret['msg'] = '삭제하였습니다.'
return jsonify(ret)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return jsonify({'ret': 'danger', 'msg': str(e)})
def scheduler_function(self):
if app.config['config']['use_celery']:
result = LogicMain.task.apply_async()
result.get()
else:
LogicMain.task()
def migration(self):
try:
db_version = ModelSetting.get_int('db_version')
connect = sqlite3.connect(os.path.join(path_data, 'db', f'{package_name}.db'))
if db_version < 2:
cursor = connect.cursor()
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'interval'")
interval = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_interval'",
(interval,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'interval'")
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'auto_start'")
auto_start = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_auto_start'",
(auto_start,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'auto_start'")
connect.commit()
connect.close()
ModelSetting.set('db_version', LogicMain.db_default['db_version'])
db.session.flush()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
@celery.task
def task():
try:
for entity in ModelScheduler.get_list():
if not entity.is_live:
continue
logger.debug('scheduler download %s', entity.url)
video_url = LogicMain.get_first_live_video(entity.url) # 첫번째 영상
if video_url is None or video_url in LogicMain.download_list:
continue
download = APIYoutubeDL.download(package_name, entity.key, video_url, filename=entity.filename,
save_path=entity.save_path, start=True,
cookiefile=ModelSetting.get('cookiefile_path'))
entity.update(LogicMain.get_count_video(entity.url)) # 임시
if download['errorCode'] == 0:
LogicMain.download_list.add(video_url)
Thread(target=LogicMain.download_check_function,
args=(video_url, download['index'], entity.key)).start()
entity.update()
else:
logger.debug('scheduler download fail %s', download['errorCode'])
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
download_list = set()
@staticmethod
def download_check_function(url: str, index: int, key: str):
time.sleep(10) # 10초 대기
status = APIYoutubeDL.status(package_name, index, key)
if status['status'] == 'ERROR':
LogicMain.download_list.remove(url)
@staticmethod
def download(form) -> int:
options = {
'save_path': form['save_path'],
'filename': form['filename'],
}
for i in form.getlist('download[]'):
LogicQueue.add_queue(i, options)
return len(form.getlist('download[]'))
@staticmethod
def get_scheduler() -> list:
scheduler_list = []
for i in ModelScheduler.get_list(True):
i['last_time'] = i['last_time'].strftime('%m-%d %H:%M:%S')
i['path'] = os.path.join(i['save_path'], i['filename'])
scheduler_list.append(i)
return scheduler_list
@staticmethod
def add_scheduler(form) -> bool:
if form['db_id']:
data = {
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.find(form['db_id']).update(data)
else:
info_dict = LogicMain.get_channel_info(form['url'])
if info_dict is None:
return False
data = {
'webpage_url': info_dict['webpage_url'],
'title': info_dict['title'],
'count': info_dict['count'],
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.create(data)
return True
@staticmethod
def del_scheduler(db_id: int):
logger.debug('del_scheduler %s', db_id)
ModelScheduler.find(db_id).delete()
@staticmethod
def get_channel_info(channel_url: str) -> Optional[dict]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/member/v1.0/channel-{channel_id}/officialProfiles'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'officialName',
'types': 'STAR',
'gcc': 'KR',
'locale': 'ko_KR'
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
try:
json = requests.get(url, params=params, headers=headers).json()[0]
except (IndexError, KeyError):
# 잘못된 channel_id 등의 이유로 엉뚱한 값이 반환되면
return None
channel_info = {
'webpage_url': f'https://www.vlive.tv/channel/{channel_id}',
'title': json['officialName'],
'count': LogicMain.get_count_video(channel_url),
}
return channel_info
@staticmethod
def get_first_live_video(channel_url: str) -> Optional[str]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/post/v1.0/channel-{channel_id}/starPosts'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'contentType,officialVideo,title,url',
'gcc': 'KR',
'locale': 'ko_KR',
'pageSize': 5
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
json = requests.get(url, params=params, headers=headers).json()
video_url = None
for data in json['data']:
if data['contentType'] == 'VIDEO':
if data['officialVideo']['type'] == 'LIVE':
video_url = data['url']
break
return video_url
@staticmethod
def get_count_video(channel_url: str) -> int:
html = requests.get(channel_url).text
pattern = re.compile(r'"videoCountOfStar":(\d+)')
return int(pattern.findall(html)[0])
@staticmethod
def get_recent_html() -> str:
url = 'https://www.vlive.tv/home/video/more'
params = {
'viewType': 'recent',
'pageSize': 20,
'pageNo': 1,
}
headers = {
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3'
}
html = requests.get(url, params=params, headers=headers).text
html = re.sub(r'href="(.+?)"', r'href="https://www.vlive.tv\1"', html)
html = re.sub(r'onclick="vlive.tv.common.videoGa\(this\);"', r'onclick="link_click(this); return false;"', html)
html = re.sub(r'onclick="vlive.tv.common.chGa\(this\);"|onerror="(.+?)"', '', html)
return html | main.py | from typing import Optional
import traceback
import os
import sqlite3
import time
import re
from threading import Thread
from flask import render_template, jsonify
import requests
from framework import path_data, scheduler, app, db, celery
from framework.common.plugin import LogicModuleBase, default_route_socketio
from .plugin import P
from .logic_queue import LogicQueue
from .model import ModelScheduler
from .api_youtube_dl import APIYoutubeDL
logger = P.logger
package_name = P.package_name
ModelSetting = P.ModelSetting
class LogicMain(LogicModuleBase):
db_default = {
'db_version': '2',
f'{package_name}_interval': '* * * * *',
f'{package_name}_auto_start': 'False',
'default_save_path': os.path.join(path_data, 'download', package_name),
'default_filename': '%(title)s.%(id)s.%(ext)s',
'cookiefile_path': ''
}
def __init__(self, p):
super(LogicMain, self).__init__(p, None, scheduler_desc='V LIVE 새로운 영상 다운로드')
self.name = package_name # 모듈명
default_route_socketio(p, self)
def plugin_load(self):
try:
LogicQueue.queue_load()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
def process_menu(self, sub, req):
try:
arg = {
'package_name': package_name,
'sub': package_name,
'template_name': f'{package_name}_{sub}'
}
if sub == 'setting':
arg.update(ModelSetting.to_dict())
job_id = f'{self.P.package_name}_{self.name}'
arg['scheduler'] = str(scheduler.is_include(job_id))
arg['is_running'] = str(scheduler.is_running(job_id))
arg['path_data'] = path_data
elif sub == 'recent':
arg['url'] = req.args.get('url', '')
arg['recent_html'] = LogicMain.get_recent_html()
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
elif sub == 'scheduler':
arg['save_path'] = ModelSetting.get('default_save_path')
arg['filename'] = ModelSetting.get('default_filename')
return render_template(f'{package_name}_{sub}.html', arg=arg)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return render_template('sample.html', title=f'{package_name} - {sub}')
def process_ajax(self, sub, req):
try:
logger.debug('AJAX: %s, %s', sub, req.values)
ret = {'ret': 'success'}
if sub == 'add_download':
ret['msg'] = f'{LogicMain.download(req.form)}개를 큐에 추가하였습니다.'
elif sub == 'list_scheduler':
ret['data'] = LogicMain.get_scheduler()
elif sub == 'add_scheduler':
if LogicMain.add_scheduler(req.form):
ret['msg'] = '스케줄을 저장하였습니다.'
else:
ret['ret'] = 'warning'
ret['msg'] = 'V LIVE 채널을 분석하지 못했습니다.'
elif sub == 'del_scheduler':
LogicMain.del_scheduler(req.form['id'])
ret['msg'] = '삭제하였습니다.'
return jsonify(ret)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return jsonify({'ret': 'danger', 'msg': str(e)})
def scheduler_function(self):
if app.config['config']['use_celery']:
result = LogicMain.task.apply_async()
result.get()
else:
LogicMain.task()
def migration(self):
try:
db_version = ModelSetting.get_int('db_version')
connect = sqlite3.connect(os.path.join(path_data, 'db', f'{package_name}.db'))
if db_version < 2:
cursor = connect.cursor()
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'interval'")
interval = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_interval'",
(interval,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'interval'")
cursor.execute(f"SELECT * FROM {package_name}_setting WHERE key = 'auto_start'")
auto_start = cursor.fetchone()[2]
cursor.execute(f"UPDATE {package_name}_setting SET value = ? WHERE key = '{package_name}_auto_start'",
(auto_start,))
cursor.execute(f"DELETE FROM {package_name}_setting WHERE key = 'auto_start'")
connect.commit()
connect.close()
ModelSetting.set('db_version', LogicMain.db_default['db_version'])
db.session.flush()
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
@staticmethod
@celery.task
def task():
try:
for entity in ModelScheduler.get_list():
if not entity.is_live:
continue
logger.debug('scheduler download %s', entity.url)
video_url = LogicMain.get_first_live_video(entity.url) # 첫번째 영상
if video_url is None or video_url in LogicMain.download_list:
continue
download = APIYoutubeDL.download(package_name, entity.key, video_url, filename=entity.filename,
save_path=entity.save_path, start=True,
cookiefile=ModelSetting.get('cookiefile_path'))
entity.update(LogicMain.get_count_video(entity.url)) # 임시
if download['errorCode'] == 0:
LogicMain.download_list.add(video_url)
Thread(target=LogicMain.download_check_function,
args=(video_url, download['index'], entity.key)).start()
entity.update()
else:
logger.debug('scheduler download fail %s', download['errorCode'])
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
download_list = set()
@staticmethod
def download_check_function(url: str, index: int, key: str):
time.sleep(10) # 10초 대기
status = APIYoutubeDL.status(package_name, index, key)
if status['status'] == 'ERROR':
LogicMain.download_list.remove(url)
@staticmethod
def download(form) -> int:
options = {
'save_path': form['save_path'],
'filename': form['filename'],
}
for i in form.getlist('download[]'):
LogicQueue.add_queue(i, options)
return len(form.getlist('download[]'))
@staticmethod
def get_scheduler() -> list:
scheduler_list = []
for i in ModelScheduler.get_list(True):
i['last_time'] = i['last_time'].strftime('%m-%d %H:%M:%S')
i['path'] = os.path.join(i['save_path'], i['filename'])
scheduler_list.append(i)
return scheduler_list
@staticmethod
def add_scheduler(form) -> bool:
if form['db_id']:
data = {
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.find(form['db_id']).update(data)
else:
info_dict = LogicMain.get_channel_info(form['url'])
if info_dict is None:
return False
data = {
'webpage_url': info_dict['webpage_url'],
'title': info_dict['title'],
'count': info_dict['count'],
'save_path': form['save_path'],
'filename': form['filename'],
'is_live': True
# 'is_live': bool(form['is_live']) if str(form['is_live']).lower() != 'false' else False
}
ModelScheduler.create(data)
return True
@staticmethod
def del_scheduler(db_id: int):
logger.debug('del_scheduler %s', db_id)
ModelScheduler.find(db_id).delete()
@staticmethod
def get_channel_info(channel_url: str) -> Optional[dict]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/member/v1.0/channel-{channel_id}/officialProfiles'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'officialName',
'types': 'STAR',
'gcc': 'KR',
'locale': 'ko_KR'
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
try:
json = requests.get(url, params=params, headers=headers).json()[0]
except (IndexError, KeyError):
# 잘못된 channel_id 등의 이유로 엉뚱한 값이 반환되면
return None
channel_info = {
'webpage_url': f'https://www.vlive.tv/channel/{channel_id}',
'title': json['officialName'],
'count': LogicMain.get_count_video(channel_url),
}
return channel_info
@staticmethod
def get_first_live_video(channel_url: str) -> Optional[str]:
channel_id = channel_url.split('/')[-1]
url = f'https://www.vlive.tv/globalv-web/vam-web/post/v1.0/channel-{channel_id}/starPosts'
params = {
'appId': '8c6cc7b45d2568fb668be6e05b6e5a3b',
'fields': 'contentType,officialVideo,title,url',
'gcc': 'KR',
'locale': 'ko_KR',
'pageSize': 5
}
headers = {
'Referer': 'https://www.vlive.tv/'
}
json = requests.get(url, params=params, headers=headers).json()
video_url = None
for data in json['data']:
if data['contentType'] == 'VIDEO':
if data['officialVideo']['type'] == 'LIVE':
video_url = data['url']
break
return video_url
@staticmethod
def get_count_video(channel_url: str) -> int:
html = requests.get(channel_url).text
pattern = re.compile(r'"videoCountOfStar":(\d+)')
return int(pattern.findall(html)[0])
@staticmethod
def get_recent_html() -> str:
url = 'https://www.vlive.tv/home/video/more'
params = {
'viewType': 'recent',
'pageSize': 20,
'pageNo': 1,
}
headers = {
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3'
}
html = requests.get(url, params=params, headers=headers).text
html = re.sub(r'href="(.+?)"', r'href="https://www.vlive.tv\1"', html)
html = re.sub(r'onclick="vlive.tv.common.videoGa\(this\);"', r'onclick="link_click(this); return false;"', html)
html = re.sub(r'onclick="vlive.tv.common.chGa\(this\);"|onerror="(.+?)"', '', html)
return html | 0.52902 | 0.072703 |
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.clusters_operations import ClustersOperations
from .operations.cluster_versions_operations import ClusterVersionsOperations
from .operations.operations import Operations
from .operations.application_types_operations import ApplicationTypesOperations
from .operations.application_type_versions_operations import ApplicationTypeVersionsOperations
from .operations.applications_operations import ApplicationsOperations
from .operations.services_operations import ServicesOperations
from . import models
class ServiceFabricManagementClientConfiguration(AzureConfiguration):
"""Configuration for ServiceFabricManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The customer subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ServiceFabricManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-servicefabric/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ServiceFabricManagementClient(SDKClient):
"""Service Fabric Management Client
:ivar config: Configuration for client.
:vartype config: ServiceFabricManagementClientConfiguration
:ivar clusters: Clusters operations
:vartype clusters: azure.mgmt.servicefabric.operations.ClustersOperations
:ivar cluster_versions: ClusterVersions operations
:vartype cluster_versions: azure.mgmt.servicefabric.operations.ClusterVersionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.servicefabric.operations.Operations
:ivar application_types: ApplicationTypes operations
:vartype application_types: azure.mgmt.servicefabric.operations.ApplicationTypesOperations
:ivar application_type_versions: ApplicationTypeVersions operations
:vartype application_type_versions: azure.mgmt.servicefabric.operations.ApplicationTypeVersionsOperations
:ivar applications: Applications operations
:vartype applications: azure.mgmt.servicefabric.operations.ApplicationsOperations
:ivar services: Services operations
:vartype services: azure.mgmt.servicefabric.operations.ServicesOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The customer subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ServiceFabricManagementClientConfiguration(credentials, subscription_id, base_url)
super(ServiceFabricManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2019-03-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.cluster_versions = ClusterVersionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.application_types = ApplicationTypesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.application_type_versions = ApplicationTypeVersionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.services = ServicesOperations(
self._client, self.config, self._serialize, self._deserialize) | sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/service_fabric_management_client.py |
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.clusters_operations import ClustersOperations
from .operations.cluster_versions_operations import ClusterVersionsOperations
from .operations.operations import Operations
from .operations.application_types_operations import ApplicationTypesOperations
from .operations.application_type_versions_operations import ApplicationTypeVersionsOperations
from .operations.applications_operations import ApplicationsOperations
from .operations.services_operations import ServicesOperations
from . import models
class ServiceFabricManagementClientConfiguration(AzureConfiguration):
"""Configuration for ServiceFabricManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The customer subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ServiceFabricManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-servicefabric/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ServiceFabricManagementClient(SDKClient):
"""Service Fabric Management Client
:ivar config: Configuration for client.
:vartype config: ServiceFabricManagementClientConfiguration
:ivar clusters: Clusters operations
:vartype clusters: azure.mgmt.servicefabric.operations.ClustersOperations
:ivar cluster_versions: ClusterVersions operations
:vartype cluster_versions: azure.mgmt.servicefabric.operations.ClusterVersionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.servicefabric.operations.Operations
:ivar application_types: ApplicationTypes operations
:vartype application_types: azure.mgmt.servicefabric.operations.ApplicationTypesOperations
:ivar application_type_versions: ApplicationTypeVersions operations
:vartype application_type_versions: azure.mgmt.servicefabric.operations.ApplicationTypeVersionsOperations
:ivar applications: Applications operations
:vartype applications: azure.mgmt.servicefabric.operations.ApplicationsOperations
:ivar services: Services operations
:vartype services: azure.mgmt.servicefabric.operations.ServicesOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The customer subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ServiceFabricManagementClientConfiguration(credentials, subscription_id, base_url)
super(ServiceFabricManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2019-03-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.cluster_versions = ClusterVersionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.application_types = ApplicationTypesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.application_type_versions = ApplicationTypeVersionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.applications = ApplicationsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.services = ServicesOperations(
self._client, self.config, self._serialize, self._deserialize) | 0.873066 | 0.07373 |
import collections
import os.path as osp
import time
import warnings
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from ..core.label_generators import LabelGenerator
from ..core.metrics.accuracy import accuracy
from ..data import build_train_dataloader, build_val_dataloader
from ..utils import bcolors
from ..utils.dist_utils import get_dist_info, synchronize
from ..utils.meters import Meters
from ..utils.torch_utils import copy_state_dict, load_checkpoint, save_checkpoint
from .test import val_reid
from .train import batch_processor, set_random_seed
class BaseRunner(object):
"""
Base Runner
"""
def __init__(
self,
cfg,
model,
optimizer,
criterions,
train_loader,
train_sets=None,
lr_scheduler=None,
meter_formats=None,
print_freq=10,
reset_optim=True,
label_generator=None,
):
super(BaseRunner, self).__init__()
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
if meter_formats is None:
meter_formats = {"Time": ":.3f", "Acc@1": ":.2%"}
self.cfg = cfg
self.model = model
self.optimizer = optimizer
self.criterions = criterions
self.lr_scheduler = lr_scheduler
self.print_freq = print_freq
self.reset_optim = reset_optim
self.label_generator = label_generator
self.is_pseudo = (
"PSEUDO_LABELS" in self.cfg.TRAIN
and self.cfg.TRAIN.unsup_dataset_indexes is not None
)
if self.is_pseudo:
if self.label_generator is None:
self.label_generator = LabelGenerator(self.cfg, self.model)
self._rank, self._world_size, self._is_dist = get_dist_info()
self._epoch, self._start_epoch = 0, 0
self._best_mAP = 0
# build data loaders
self.train_loader, self.train_sets = train_loader, train_sets
self.val_loader, self.val_set = build_val_dataloader(cfg)
# save training variables
for key in criterions.keys():
meter_formats[key] = ":.3f"
self.train_progress = Meters(
meter_formats, self.cfg.TRAIN.iters, prefix="Train: "
)
def run(self):
# the whole process for training
for ep in range(self._start_epoch, self.cfg.TRAIN.epochs):
self._epoch = ep
# generate pseudo labels
if self.is_pseudo:
if (
ep % self.cfg.TRAIN.PSEUDO_LABELS.freq == 0
or ep == self._start_epoch
):
self.update_labels()
synchronize()
# train
self.train()
synchronize()
# validate
if (ep + 1) % self.cfg.TRAIN.val_freq == 0 or (
ep + 1
) == self.cfg.TRAIN.epochs:
mAP = self.val()
self.save(mAP)
# update learning rate
if self.lr_scheduler is not None:
self.lr_scheduler.step()
# synchronize distributed processes
synchronize()
def update_labels(self):
sep = "*************************"
print(f"\n{sep} Start updating pseudo labels on epoch {self._epoch} {sep}\n")
# generate pseudo labels
pseudo_labels, label_centers = self.label_generator(
self._epoch, print_freq=self.print_freq
)
# update train loader
self.train_loader, self.train_sets = build_train_dataloader(
self.cfg, pseudo_labels, self.train_sets, self._epoch,
)
# update criterions
if "cross_entropy" in self.criterions.keys():
self.criterions[
"cross_entropy"
].num_classes = self.train_loader.loader.dataset.num_pids
# reset optim (optional)
if self.reset_optim:
self.optimizer.state = collections.defaultdict(dict)
# update classifier centers
start_cls_id = 0
for idx in range(len(self.cfg.TRAIN.datasets)):
if idx in self.cfg.TRAIN.unsup_dataset_indexes:
labels = torch.arange(
start_cls_id, start_cls_id + self.train_sets[idx].num_pids
)
centers = label_centers[self.cfg.TRAIN.unsup_dataset_indexes.index(idx)]
if isinstance(self.model, list):
for model in self.model:
if isinstance(model, (DataParallel, DistributedDataParallel)):
model = model.module
model.initialize_centers(centers, labels)
else:
model = self.model
if isinstance(model, (DataParallel, DistributedDataParallel)):
model = model.module
model.initialize_centers(centers, labels)
start_cls_id += self.train_sets[idx].num_pids
print(f"\n{sep} Finished updating pseudo label {sep}n")
def train(self):
# one loop for training
if isinstance(self.model, list):
for model in self.model:
model.train()
else:
self.model.train()
self.train_progress.reset(prefix="Epoch: [{}]".format(self._epoch))
if isinstance(self.train_loader, list):
for loader in self.train_loader:
loader.new_epoch(self._epoch)
else:
self.train_loader.new_epoch(self._epoch)
end = time.time()
for iter in range(self.cfg.TRAIN.iters):
if isinstance(self.train_loader, list):
batch = [loader.next() for loader in self.train_loader]
else:
batch = self.train_loader.next()
# self.train_progress.update({'Data': time.time()-end})
loss = self.train_step(iter, batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.train_progress.update({"Time": time.time() - end})
end = time.time()
if iter % self.print_freq == 0:
self.train_progress.display(iter)
def train_step(self, iter, batch):
# need to be re-written case by case
assert not isinstance(
self.model, list
), "please re-write 'train_step()' to support list of models"
data = batch_processor(batch, self.cfg.MODEL.dsbn)
if len(data["img"]) > 1:
warnings.warn(
"please re-write the 'runner.train_step()' function to make use of "
"mutual transformer."
)
inputs = data["img"][0].cuda()
targets = data["id"].cuda()
results = self.model(inputs)
if "prob" in results.keys():
results["prob"] = results["prob"][
:, : self.train_loader.loader.dataset.num_pids
]
total_loss = 0
meters = {}
for key in self.criterions.keys():
loss = self.criterions[key](results, targets)
total_loss += loss * float(self.cfg.TRAIN.LOSS.losses[key])
meters[key] = loss.item()
if "prob" in results.keys():
acc = accuracy(results["prob"].data, targets.data)
meters["Acc@1"] = acc[0]
self.train_progress.update(meters)
return total_loss
def val(self):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
better_mAP = 0
for idx in range(len(model_list)):
if len(model_list) > 1:
print("==> Val on the no.{} model".format(idx))
cmc, mAP = val_reid(
self.cfg,
model_list[idx],
self.val_loader[0],
self.val_set[0],
self._epoch,
self.cfg.TRAIN.val_dataset,
self._rank,
print_freq=self.print_freq,
)
better_mAP = max(better_mAP, mAP)
return better_mAP
def save(self, mAP):
is_best = mAP > self._best_mAP
self._best_mAP = max(self._best_mAP, mAP)
print(
bcolors.OKGREEN
+ "\n * Finished epoch {:3d} mAP: {:5.1%} best: {:5.1%}{}\n".format(
self._epoch, mAP, self._best_mAP, " *" if is_best else ""
)
+ bcolors.ENDC
)
fpath = osp.join(self.cfg.work_dir, "checkpoint.pth")
if self._rank == 0:
# only on cuda:0
self.save_model(is_best, fpath)
def save_model(self, is_best, fpath):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
state_dict = {}
for idx, model in enumerate(model_list):
state_dict["state_dict_" + str(idx + 1)] = model.state_dict()
state_dict["epoch"] = self._epoch + 1
state_dict["best_mAP"] = self._best_mAP
save_checkpoint(state_dict, is_best, fpath=fpath)
def resume(self, path):
# resume from a training checkpoint (not source pretrain)
state_dict = load_checkpoint(path)
self.load_model(state_dict)
synchronize()
def load_model(self, state_dict):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
for idx, model in enumerate(model_list):
copy_state_dict(state_dict["state_dict_" + str(idx + 1)], model)
self._start_epoch = state_dict["epoch"]
self._best_mAP = state_dict["best_mAP"]
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size | openunreid/apis/runner.py |
import collections
import os.path as osp
import time
import warnings
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from ..core.label_generators import LabelGenerator
from ..core.metrics.accuracy import accuracy
from ..data import build_train_dataloader, build_val_dataloader
from ..utils import bcolors
from ..utils.dist_utils import get_dist_info, synchronize
from ..utils.meters import Meters
from ..utils.torch_utils import copy_state_dict, load_checkpoint, save_checkpoint
from .test import val_reid
from .train import batch_processor, set_random_seed
class BaseRunner(object):
"""
Base Runner
"""
def __init__(
self,
cfg,
model,
optimizer,
criterions,
train_loader,
train_sets=None,
lr_scheduler=None,
meter_formats=None,
print_freq=10,
reset_optim=True,
label_generator=None,
):
super(BaseRunner, self).__init__()
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
if meter_formats is None:
meter_formats = {"Time": ":.3f", "Acc@1": ":.2%"}
self.cfg = cfg
self.model = model
self.optimizer = optimizer
self.criterions = criterions
self.lr_scheduler = lr_scheduler
self.print_freq = print_freq
self.reset_optim = reset_optim
self.label_generator = label_generator
self.is_pseudo = (
"PSEUDO_LABELS" in self.cfg.TRAIN
and self.cfg.TRAIN.unsup_dataset_indexes is not None
)
if self.is_pseudo:
if self.label_generator is None:
self.label_generator = LabelGenerator(self.cfg, self.model)
self._rank, self._world_size, self._is_dist = get_dist_info()
self._epoch, self._start_epoch = 0, 0
self._best_mAP = 0
# build data loaders
self.train_loader, self.train_sets = train_loader, train_sets
self.val_loader, self.val_set = build_val_dataloader(cfg)
# save training variables
for key in criterions.keys():
meter_formats[key] = ":.3f"
self.train_progress = Meters(
meter_formats, self.cfg.TRAIN.iters, prefix="Train: "
)
def run(self):
# the whole process for training
for ep in range(self._start_epoch, self.cfg.TRAIN.epochs):
self._epoch = ep
# generate pseudo labels
if self.is_pseudo:
if (
ep % self.cfg.TRAIN.PSEUDO_LABELS.freq == 0
or ep == self._start_epoch
):
self.update_labels()
synchronize()
# train
self.train()
synchronize()
# validate
if (ep + 1) % self.cfg.TRAIN.val_freq == 0 or (
ep + 1
) == self.cfg.TRAIN.epochs:
mAP = self.val()
self.save(mAP)
# update learning rate
if self.lr_scheduler is not None:
self.lr_scheduler.step()
# synchronize distributed processes
synchronize()
def update_labels(self):
sep = "*************************"
print(f"\n{sep} Start updating pseudo labels on epoch {self._epoch} {sep}\n")
# generate pseudo labels
pseudo_labels, label_centers = self.label_generator(
self._epoch, print_freq=self.print_freq
)
# update train loader
self.train_loader, self.train_sets = build_train_dataloader(
self.cfg, pseudo_labels, self.train_sets, self._epoch,
)
# update criterions
if "cross_entropy" in self.criterions.keys():
self.criterions[
"cross_entropy"
].num_classes = self.train_loader.loader.dataset.num_pids
# reset optim (optional)
if self.reset_optim:
self.optimizer.state = collections.defaultdict(dict)
# update classifier centers
start_cls_id = 0
for idx in range(len(self.cfg.TRAIN.datasets)):
if idx in self.cfg.TRAIN.unsup_dataset_indexes:
labels = torch.arange(
start_cls_id, start_cls_id + self.train_sets[idx].num_pids
)
centers = label_centers[self.cfg.TRAIN.unsup_dataset_indexes.index(idx)]
if isinstance(self.model, list):
for model in self.model:
if isinstance(model, (DataParallel, DistributedDataParallel)):
model = model.module
model.initialize_centers(centers, labels)
else:
model = self.model
if isinstance(model, (DataParallel, DistributedDataParallel)):
model = model.module
model.initialize_centers(centers, labels)
start_cls_id += self.train_sets[idx].num_pids
print(f"\n{sep} Finished updating pseudo label {sep}n")
def train(self):
# one loop for training
if isinstance(self.model, list):
for model in self.model:
model.train()
else:
self.model.train()
self.train_progress.reset(prefix="Epoch: [{}]".format(self._epoch))
if isinstance(self.train_loader, list):
for loader in self.train_loader:
loader.new_epoch(self._epoch)
else:
self.train_loader.new_epoch(self._epoch)
end = time.time()
for iter in range(self.cfg.TRAIN.iters):
if isinstance(self.train_loader, list):
batch = [loader.next() for loader in self.train_loader]
else:
batch = self.train_loader.next()
# self.train_progress.update({'Data': time.time()-end})
loss = self.train_step(iter, batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.train_progress.update({"Time": time.time() - end})
end = time.time()
if iter % self.print_freq == 0:
self.train_progress.display(iter)
def train_step(self, iter, batch):
# need to be re-written case by case
assert not isinstance(
self.model, list
), "please re-write 'train_step()' to support list of models"
data = batch_processor(batch, self.cfg.MODEL.dsbn)
if len(data["img"]) > 1:
warnings.warn(
"please re-write the 'runner.train_step()' function to make use of "
"mutual transformer."
)
inputs = data["img"][0].cuda()
targets = data["id"].cuda()
results = self.model(inputs)
if "prob" in results.keys():
results["prob"] = results["prob"][
:, : self.train_loader.loader.dataset.num_pids
]
total_loss = 0
meters = {}
for key in self.criterions.keys():
loss = self.criterions[key](results, targets)
total_loss += loss * float(self.cfg.TRAIN.LOSS.losses[key])
meters[key] = loss.item()
if "prob" in results.keys():
acc = accuracy(results["prob"].data, targets.data)
meters["Acc@1"] = acc[0]
self.train_progress.update(meters)
return total_loss
def val(self):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
better_mAP = 0
for idx in range(len(model_list)):
if len(model_list) > 1:
print("==> Val on the no.{} model".format(idx))
cmc, mAP = val_reid(
self.cfg,
model_list[idx],
self.val_loader[0],
self.val_set[0],
self._epoch,
self.cfg.TRAIN.val_dataset,
self._rank,
print_freq=self.print_freq,
)
better_mAP = max(better_mAP, mAP)
return better_mAP
def save(self, mAP):
is_best = mAP > self._best_mAP
self._best_mAP = max(self._best_mAP, mAP)
print(
bcolors.OKGREEN
+ "\n * Finished epoch {:3d} mAP: {:5.1%} best: {:5.1%}{}\n".format(
self._epoch, mAP, self._best_mAP, " *" if is_best else ""
)
+ bcolors.ENDC
)
fpath = osp.join(self.cfg.work_dir, "checkpoint.pth")
if self._rank == 0:
# only on cuda:0
self.save_model(is_best, fpath)
def save_model(self, is_best, fpath):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
state_dict = {}
for idx, model in enumerate(model_list):
state_dict["state_dict_" + str(idx + 1)] = model.state_dict()
state_dict["epoch"] = self._epoch + 1
state_dict["best_mAP"] = self._best_mAP
save_checkpoint(state_dict, is_best, fpath=fpath)
def resume(self, path):
# resume from a training checkpoint (not source pretrain)
state_dict = load_checkpoint(path)
self.load_model(state_dict)
synchronize()
def load_model(self, state_dict):
if not isinstance(self.model, list):
model_list = [self.model]
else:
model_list = self.model
for idx, model in enumerate(model_list):
copy_state_dict(state_dict["state_dict_" + str(idx + 1)], model)
self._start_epoch = state_dict["epoch"]
self._best_mAP = state_dict["best_mAP"]
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size | 0.566139 | 0.169028 |
import sys, os
from datetime import datetime
pasta = 'c:'
#print(os.path.isfile('caixa.txt'))
def main():
dinheiro(preco())
def preco():
while 1:
preco = raw_input('Preço: ')
try:
preco = preco.replace(',','.')
preco = eval(preco)
print(preco)
preco = float(preco)
return(preco)
break
except:
if (preco=='c') or (preco=='cancelar'):
continue
elif (preco=='f') or (preco=='fechar'):
fechar_caixa()
elif (preco=='s') or (preco=='sair'):
sair()
else: continue
def dinheiro(preco):
arq = open(pasta+'entrada.txt', 'a')
texto = []
while 1:
dinheiro = raw_input('Dinheiro: ')
try:
dinheiro = float(dinheiro)
if (dinheiro < preco):
print('%.2f, falta dinheiro.' % (dinheiro-preco))
continue
break
except:
if (dinheiro=='c') or (dinheiro=='cancelar'):
main()
elif (preco=='f') or (preco=='fechar'):
fechar_caixa()
elif (dinheiro=='s') or (dinheiro=='sair'):
sair()
else: continue
troco = dinheiro - preco
print('Troco: %.2f\n' % troco)
texto.append('%s\n' % preco)
arq.writelines(texto)
arq.close()
main()
def sair():
sys.exit(0)
def fechar_caixa():
d = raw_input('Deseja fechar o caixa: s/n? ')
if (d == 's'):
lido = []
soma = 0
arq = open(pasta+'entrada.txt', 'r')
arq2 = open(pasta+'fechamento.txt', 'a')
lido = arq.readlines()
for linha in lido:
print(linha)
soma = soma+float(linha)
total = 'Total: R$'+str(soma)
print(total)
#data e hora
today = datetime.now()
data = today.strftime("%d/%m/%y")
hora = today.strftime("%I:%M%p")
data = '\n'+data+'--'+hora
print(data)
#salva
arq2.writelines(data+' - '+total)
arq2.close()
arq.close()
#limpa arquivo
arq = open(pasta+'entrada.txt', 'w')
arq.close()
else: main()
if __name__ == "__main__":
main() | python-examples-master/caixa.py | import sys, os
from datetime import datetime
pasta = 'c:'
#print(os.path.isfile('caixa.txt'))
def main():
dinheiro(preco())
def preco():
while 1:
preco = raw_input('Preço: ')
try:
preco = preco.replace(',','.')
preco = eval(preco)
print(preco)
preco = float(preco)
return(preco)
break
except:
if (preco=='c') or (preco=='cancelar'):
continue
elif (preco=='f') or (preco=='fechar'):
fechar_caixa()
elif (preco=='s') or (preco=='sair'):
sair()
else: continue
def dinheiro(preco):
arq = open(pasta+'entrada.txt', 'a')
texto = []
while 1:
dinheiro = raw_input('Dinheiro: ')
try:
dinheiro = float(dinheiro)
if (dinheiro < preco):
print('%.2f, falta dinheiro.' % (dinheiro-preco))
continue
break
except:
if (dinheiro=='c') or (dinheiro=='cancelar'):
main()
elif (preco=='f') or (preco=='fechar'):
fechar_caixa()
elif (dinheiro=='s') or (dinheiro=='sair'):
sair()
else: continue
troco = dinheiro - preco
print('Troco: %.2f\n' % troco)
texto.append('%s\n' % preco)
arq.writelines(texto)
arq.close()
main()
def sair():
sys.exit(0)
def fechar_caixa():
d = raw_input('Deseja fechar o caixa: s/n? ')
if (d == 's'):
lido = []
soma = 0
arq = open(pasta+'entrada.txt', 'r')
arq2 = open(pasta+'fechamento.txt', 'a')
lido = arq.readlines()
for linha in lido:
print(linha)
soma = soma+float(linha)
total = 'Total: R$'+str(soma)
print(total)
#data e hora
today = datetime.now()
data = today.strftime("%d/%m/%y")
hora = today.strftime("%I:%M%p")
data = '\n'+data+'--'+hora
print(data)
#salva
arq2.writelines(data+' - '+total)
arq2.close()
arq.close()
#limpa arquivo
arq = open(pasta+'entrada.txt', 'w')
arq.close()
else: main()
if __name__ == "__main__":
main() | 0.03816 | 0.066782 |
import chk,calc,test,cnst,trig
import sys # Surely I am allowed to get command line options
#NOTE: infix operator control characters must be one character
OPERATORS = [
[
# Infix
[['^'],lambda a,b: a**b],
[['C'],trig.comb],
[['x','*'],lambda a,b: a*b],
[['/'],lambda a,b: a/b],
[['+'],lambda a,b: a+b],
[['-'],lambda a,b: a-b],
],[
# Prefix. Second highest priority behind postfix.
# Using series expansions for trig
# Radians only for trig. You can use fractions of tau. Using the inferior circle constant will exit the calculator
[['asin','arcsin'],trig.arcsin],
[['acos','arccos'],trig.arccos],
[['atan','arctan'],trig.arctan],
[['sin'],trig.sin],
[['cos'],trig.cos],
[['tan'],trig.tan],
],[
# Postfix. Highest priority
[['!'],trig.fact]
],[
# Bracketed multi parameter functions. Not implemented. Format name, number of operands, function
[['log'],2,""],
[['']]
]
]
BRACKETS = [
['{','}'],
['[',']'],
['(',')'],
]
brk = "".join("".join(a) for a in BRACKETS)
ops = "".join("".join(a[0]) for a in OPERATORS[0])
pfx = sum([a[0] for a in OPERATORS[1]],[])
ptx = sum([a[0] for a in OPERATORS[2]],[])
if __name__ == "__main__":
if not ("--quiet" in sys.argv or "-q" in sys.argv):
print("""\
Type calculations exactly as you'd expect.
To switch between SCI and EXACT modes enter "mode".
For the last answer use "ans", to clear history "clr"
To kill this message on launch use the option -q or --quiet""")
try:
test.run_tests()
except AssertionError:
print("This program is broken, don't bother.")
raise SystemExit
exact = True
try:
history = open("history.txt").read().split("\n")
except:
open("history.txt","a").write("")
history = []
while True:
try:
inp = input("> ").lower()
chk.make_sure_they_understand_which_circle_constant_is_correct(inp)
if inp in ["q","q()","exit","exit()","quit","quit()"]: raise SystemExit
elif inp == "mode": exact = not exact
elif inp == "clr": open("history.txt","w").write("1\n")
else:
if inp == "": inp = "ans"
inp = str(chk.history(inp))
out = calc.parse_brackets(inp)
if out[1] != "": # don't want to print empty lines
try:
out[1].simplify()
if abs(float(out[1])) < 10**-15: print(0) # below the accuracy and is annoying
elif exact: print(out[1])
else: print(float(out[1]))
except (ValueError, AttributeError): print(out[1]) # errors
except (KeyboardInterrupt, EOFError):
print()
raise SystemExit
except Exception as e:
print("Unknown Error. Logged.")
print(e,file=open("errorlog.txt","w")) | main.py | import chk,calc,test,cnst,trig
import sys # Surely I am allowed to get command line options
#NOTE: infix operator control characters must be one character
OPERATORS = [
[
# Infix
[['^'],lambda a,b: a**b],
[['C'],trig.comb],
[['x','*'],lambda a,b: a*b],
[['/'],lambda a,b: a/b],
[['+'],lambda a,b: a+b],
[['-'],lambda a,b: a-b],
],[
# Prefix. Second highest priority behind postfix.
# Using series expansions for trig
# Radians only for trig. You can use fractions of tau. Using the inferior circle constant will exit the calculator
[['asin','arcsin'],trig.arcsin],
[['acos','arccos'],trig.arccos],
[['atan','arctan'],trig.arctan],
[['sin'],trig.sin],
[['cos'],trig.cos],
[['tan'],trig.tan],
],[
# Postfix. Highest priority
[['!'],trig.fact]
],[
# Bracketed multi parameter functions. Not implemented. Format name, number of operands, function
[['log'],2,""],
[['']]
]
]
BRACKETS = [
['{','}'],
['[',']'],
['(',')'],
]
brk = "".join("".join(a) for a in BRACKETS)
ops = "".join("".join(a[0]) for a in OPERATORS[0])
pfx = sum([a[0] for a in OPERATORS[1]],[])
ptx = sum([a[0] for a in OPERATORS[2]],[])
if __name__ == "__main__":
if not ("--quiet" in sys.argv or "-q" in sys.argv):
print("""\
Type calculations exactly as you'd expect.
To switch between SCI and EXACT modes enter "mode".
For the last answer use "ans", to clear history "clr"
To kill this message on launch use the option -q or --quiet""")
try:
test.run_tests()
except AssertionError:
print("This program is broken, don't bother.")
raise SystemExit
exact = True
try:
history = open("history.txt").read().split("\n")
except:
open("history.txt","a").write("")
history = []
while True:
try:
inp = input("> ").lower()
chk.make_sure_they_understand_which_circle_constant_is_correct(inp)
if inp in ["q","q()","exit","exit()","quit","quit()"]: raise SystemExit
elif inp == "mode": exact = not exact
elif inp == "clr": open("history.txt","w").write("1\n")
else:
if inp == "": inp = "ans"
inp = str(chk.history(inp))
out = calc.parse_brackets(inp)
if out[1] != "": # don't want to print empty lines
try:
out[1].simplify()
if abs(float(out[1])) < 10**-15: print(0) # below the accuracy and is annoying
elif exact: print(out[1])
else: print(float(out[1]))
except (ValueError, AttributeError): print(out[1]) # errors
except (KeyboardInterrupt, EOFError):
print()
raise SystemExit
except Exception as e:
print("Unknown Error. Logged.")
print(e,file=open("errorlog.txt","w")) | 0.271252 | 0.473901 |
l=None
k='group.txt'
j='0'
i=True
h=open
c='art'
b='saveSlot.txt'
a=int
Z=range
I=str
from microbit import pin0 as N,pin1 as O,pin2 as U,button_a as P,button_b as Q,display as R,Image as Y,sleep as V
W=[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]
def D(data):R.scroll(data)
def K(file,backup):
A=backup
try:
with h(file)as B:return B.read()
except OSError:J(file,A);return A
def J(file,data):
with h(file,'w')as A:A.write(I(data));return i
def H(image):
B=image;A=''
for C in Z(0,5):
F=B[C]
for D in Z(0,5):E=B[C][D];A+=I(E)
A+=':'
A=A[:-1];return A
def S(image):
A=[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]];B=0
for C in Z(0,5):
for D in Z(0,5):B=C*6+D;A[C][D]=a(image[B])
return A
def L():V(200)
def T(coords):A=coords;R.set_pixel(A[0],A[1],3)
E=a(K(b,j))
F=c+I(E)
B=S(K(F,'00000:00000:00000:00000:00000'))
M=a(K(k,j))
from radio import on,config as d,send,receive as e
on()
d(group=M)
A=[0,0]
G=0
m=0
X=l
C=770
while i:
G+=1
if O.read_analog()>C:
L()
if not A[1]==4:A[1]+=1
else:A[1]=0
T(A);G=0
if N.read_analog()>C:
L()
if not A[1]==0:A[1]-=1
else:A[1]=4
T(A);G=0
if Q.is_pressed():
L()
if not A[0]==4:A[0]+=1
else:A[0]=0
T(A);G=0
if P.is_pressed():
L()
if not A[0]==0:A[0]-=1
else:A[0]=4
T(A);G=0
if G%7==0:T(A)
if G%50==0:
f=e()
try:g=S(f);X=g;R.show(Y('66666:66066:60606:60006:66666'));V(1000)
except:pass
if U.read_analog()>C and N.read_analog()>C:J(F,H(B));D('saved')
if O.read_analog()>C and U.read_analog()>C:B=S(K(F,W));D('loaded')
if U.read_analog()>C and P.is_pressed():X=B;L();D('copied')
if O.read_analog()>C and N.read_analog()>C:
if not X==l:B=X
L();D('pasted')
if N.read_analog()>C and Q.is_pressed():J(F,H(B));E-=1;D(I(E));F=c+I(E);B=S(K(F,H(W)));J(b,E)
if N.read_analog()>C and P.is_pressed():J(F,H(B));E+=1;D(I(E));F=c+I(E);B=S(K(F,H(W)));J(b,E)
if O.read_analog()>C and P.is_pressed():send(H(B));D('sent')
if P.is_pressed()and Q.is_pressed():
if M==255:M=0
else:M+=1
D(I(M));V(500);J(k,I(M))
if O.read_analog()>C and Q.is_pressed():
G=0;D('Hold B to confirm.')
if Q.is_pressed():B=W;D('screen cleared.')
R.show(Y(H(B)))
if U.read_analog()>C:
G=0
if B[A[1]][A[0]]==9:B[A[1]][A[0]]=0
else:B[A[1]][A[0]]=a(B[A[1]][A[0]])+1
R.show(Y(H(B)));V(500) | MINIFIED.py | l=None
k='group.txt'
j='0'
i=True
h=open
c='art'
b='saveSlot.txt'
a=int
Z=range
I=str
from microbit import pin0 as N,pin1 as O,pin2 as U,button_a as P,button_b as Q,display as R,Image as Y,sleep as V
W=[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]
def D(data):R.scroll(data)
def K(file,backup):
A=backup
try:
with h(file)as B:return B.read()
except OSError:J(file,A);return A
def J(file,data):
with h(file,'w')as A:A.write(I(data));return i
def H(image):
B=image;A=''
for C in Z(0,5):
F=B[C]
for D in Z(0,5):E=B[C][D];A+=I(E)
A+=':'
A=A[:-1];return A
def S(image):
A=[[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]];B=0
for C in Z(0,5):
for D in Z(0,5):B=C*6+D;A[C][D]=a(image[B])
return A
def L():V(200)
def T(coords):A=coords;R.set_pixel(A[0],A[1],3)
E=a(K(b,j))
F=c+I(E)
B=S(K(F,'00000:00000:00000:00000:00000'))
M=a(K(k,j))
from radio import on,config as d,send,receive as e
on()
d(group=M)
A=[0,0]
G=0
m=0
X=l
C=770
while i:
G+=1
if O.read_analog()>C:
L()
if not A[1]==4:A[1]+=1
else:A[1]=0
T(A);G=0
if N.read_analog()>C:
L()
if not A[1]==0:A[1]-=1
else:A[1]=4
T(A);G=0
if Q.is_pressed():
L()
if not A[0]==4:A[0]+=1
else:A[0]=0
T(A);G=0
if P.is_pressed():
L()
if not A[0]==0:A[0]-=1
else:A[0]=4
T(A);G=0
if G%7==0:T(A)
if G%50==0:
f=e()
try:g=S(f);X=g;R.show(Y('66666:66066:60606:60006:66666'));V(1000)
except:pass
if U.read_analog()>C and N.read_analog()>C:J(F,H(B));D('saved')
if O.read_analog()>C and U.read_analog()>C:B=S(K(F,W));D('loaded')
if U.read_analog()>C and P.is_pressed():X=B;L();D('copied')
if O.read_analog()>C and N.read_analog()>C:
if not X==l:B=X
L();D('pasted')
if N.read_analog()>C and Q.is_pressed():J(F,H(B));E-=1;D(I(E));F=c+I(E);B=S(K(F,H(W)));J(b,E)
if N.read_analog()>C and P.is_pressed():J(F,H(B));E+=1;D(I(E));F=c+I(E);B=S(K(F,H(W)));J(b,E)
if O.read_analog()>C and P.is_pressed():send(H(B));D('sent')
if P.is_pressed()and Q.is_pressed():
if M==255:M=0
else:M+=1
D(I(M));V(500);J(k,I(M))
if O.read_analog()>C and Q.is_pressed():
G=0;D('Hold B to confirm.')
if Q.is_pressed():B=W;D('screen cleared.')
R.show(Y(H(B)))
if U.read_analog()>C:
G=0
if B[A[1]][A[0]]==9:B[A[1]][A[0]]=0
else:B[A[1]][A[0]]=a(B[A[1]][A[0]])+1
R.show(Y(H(B)));V(500) | 0.056075 | 0.18374 |
from google.appengine.ext.appstats import recording
from third_party.prodeagle import counter_names, config
import datetime
import math
import re
class AppStatsExport():
def append(self, end_time, key, value, result):
if key not in result:
result[key] = {}
slot = counter_names.getEpochRounded(end_time)
if slot not in result[key]:
result[key][slot] = []
result[key][slot] += [value]
def getCounters(self, from_time, until_time):
result = {}
summaries = recording.load_summary_protos()
per_request_re = False
if config.APPSTATS_PER_REQUESTS:
per_request_re = re.compile(config.APPSTATS_PER_REQUESTS)
for index, summary in enumerate(summaries):
end_time = datetime.datetime.fromtimestamp(
(summary.start_timestamp_milliseconds() +
summary.duration_milliseconds()) / 1000)
if end_time <= from_time or end_time > until_time:
continue
path_key = summary.http_path()
if config.APPSTATS_DIFFERENTIATE_BETWEEN_POST_AND_GET:
path_key += "." + summary.http_method()
if not per_request_re:
path_key = False
elif not per_request_re.match(path_key):
path_key = "Other"
self.append(end_time, "AppStats.Requests.All", 1, result)
self.append(end_time, "AppStats.Latency.Real.All",
summary.duration_milliseconds(), result)
self.append(end_time, "AppStats.Latency.API.All",
summary.api_milliseconds(),
result)
self.append(end_time, "AppStats.Latency.Overhead.All",
summary.overhead_walltime_milliseconds(), result)
if path_key:
self.append(end_time, "AppStats.Requests.All." + path_key, 1, result)
self.append(end_time, "AppStats.Latency.Real.All." + path_key,
summary.duration_milliseconds(), result)
self.append(end_time, "AppStats.Latency.API.All." + path_key ,
summary.api_milliseconds(), result)
for x in summary.rpc_stats_list():
rpc_key = x.service_call_name()
value = x.total_amount_of_calls()
self.append(end_time, "AppStats.RPC." + rpc_key, value, result)
self.append(end_time, "AppStats.RPC.Total", value, result)
if path_key:
self.append(end_time, "AppStats.RPC." + rpc_key + "." + path_key,
value, result)
self.append(end_time, "AppStats.RPC.Total." + path_key,
value, result)
for key in result.keys():
if (key.startswith("AppStats.Latency") or
key.startswith("AppStats.Requests")):
for percentile in config.APPSTATS_PERCENTILES:
new_key = key.replace("All", str(percentile) + "thPercentile", 1)
result[new_key] = {}
for slot in result[key]:
if (key.startswith("AppStats.Latency") or
key.startswith("AppStats.Requests")):
result[key][slot].sort()
for percentile in config.APPSTATS_PERCENTILES:
len_percentile = int(math.ceil(len(result[key][slot]) / 100.0 *
percentile ))
new_key = key.replace("All", str(percentile) + "thPercentile", 1)
result[new_key][slot] = \
int(sum(result[key][slot][:len_percentile]) / len_percentile)
result[key][slot] = sum(result[key][slot])
return result | deps/mrtaskman/server/third_party/prodeagle/appstats_export.py |
from google.appengine.ext.appstats import recording
from third_party.prodeagle import counter_names, config
import datetime
import math
import re
class AppStatsExport():
def append(self, end_time, key, value, result):
if key not in result:
result[key] = {}
slot = counter_names.getEpochRounded(end_time)
if slot not in result[key]:
result[key][slot] = []
result[key][slot] += [value]
def getCounters(self, from_time, until_time):
result = {}
summaries = recording.load_summary_protos()
per_request_re = False
if config.APPSTATS_PER_REQUESTS:
per_request_re = re.compile(config.APPSTATS_PER_REQUESTS)
for index, summary in enumerate(summaries):
end_time = datetime.datetime.fromtimestamp(
(summary.start_timestamp_milliseconds() +
summary.duration_milliseconds()) / 1000)
if end_time <= from_time or end_time > until_time:
continue
path_key = summary.http_path()
if config.APPSTATS_DIFFERENTIATE_BETWEEN_POST_AND_GET:
path_key += "." + summary.http_method()
if not per_request_re:
path_key = False
elif not per_request_re.match(path_key):
path_key = "Other"
self.append(end_time, "AppStats.Requests.All", 1, result)
self.append(end_time, "AppStats.Latency.Real.All",
summary.duration_milliseconds(), result)
self.append(end_time, "AppStats.Latency.API.All",
summary.api_milliseconds(),
result)
self.append(end_time, "AppStats.Latency.Overhead.All",
summary.overhead_walltime_milliseconds(), result)
if path_key:
self.append(end_time, "AppStats.Requests.All." + path_key, 1, result)
self.append(end_time, "AppStats.Latency.Real.All." + path_key,
summary.duration_milliseconds(), result)
self.append(end_time, "AppStats.Latency.API.All." + path_key ,
summary.api_milliseconds(), result)
for x in summary.rpc_stats_list():
rpc_key = x.service_call_name()
value = x.total_amount_of_calls()
self.append(end_time, "AppStats.RPC." + rpc_key, value, result)
self.append(end_time, "AppStats.RPC.Total", value, result)
if path_key:
self.append(end_time, "AppStats.RPC." + rpc_key + "." + path_key,
value, result)
self.append(end_time, "AppStats.RPC.Total." + path_key,
value, result)
for key in result.keys():
if (key.startswith("AppStats.Latency") or
key.startswith("AppStats.Requests")):
for percentile in config.APPSTATS_PERCENTILES:
new_key = key.replace("All", str(percentile) + "thPercentile", 1)
result[new_key] = {}
for slot in result[key]:
if (key.startswith("AppStats.Latency") or
key.startswith("AppStats.Requests")):
result[key][slot].sort()
for percentile in config.APPSTATS_PERCENTILES:
len_percentile = int(math.ceil(len(result[key][slot]) / 100.0 *
percentile ))
new_key = key.replace("All", str(percentile) + "thPercentile", 1)
result[new_key][slot] = \
int(sum(result[key][slot][:len_percentile]) / len_percentile)
result[key][slot] = sum(result[key][slot])
return result | 0.271735 | 0.101589 |
def load_use_conditions(use_cond, zone_usage, data_class):
"""Load use conditions from JSON, according to DIN 18599,
SIA2024 in addition some AixLib specific use conditions for central AHU
are defined.
Parameters
----------
use_cond : UseConditions()
Instance of TEASERs
BuildingObjects.UseConditions
zone_usage : str
code list for zone_usage according to 18599
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that.
"""
conditions_bind = data_class.conditions_bind
use_cond.usage = zone_usage
use_cond.typical_length = conditions_bind[zone_usage]["typical_length"]
use_cond.typical_width = conditions_bind[zone_usage]["typical_width"]
use_cond.with_heating = conditions_bind[zone_usage]["with_heating"]
use_cond.T_threshold_heating = conditions_bind[zone_usage]["T_threshold_heating"]
use_cond.T_threshold_cooling = conditions_bind[zone_usage]["T_threshold_cooling"]
use_cond.with_cooling = conditions_bind[zone_usage]["with_cooling"]
use_cond.fixed_heat_flow_rate_persons = conditions_bind[zone_usage][
"fixed_heat_flow_rate_persons"
]
use_cond.activity_degree_persons = conditions_bind[zone_usage][
"activity_degree_persons"
]
use_cond.persons = conditions_bind[zone_usage]["persons"]
use_cond.internal_gains_moisture_no_people = conditions_bind[zone_usage][
"internal_gains_moisture_no_people"
]
use_cond.ratio_conv_rad_persons = conditions_bind[zone_usage][
"ratio_conv_rad_persons"
]
use_cond.machines = conditions_bind[zone_usage]["machines"]
use_cond.ratio_conv_rad_machines = conditions_bind[zone_usage][
"ratio_conv_rad_machines"
]
use_cond.lighting_power = conditions_bind[zone_usage]["lighting_power"]
use_cond.ratio_conv_rad_lighting = conditions_bind[zone_usage][
"ratio_conv_rad_lighting"
]
use_cond.use_constant_infiltration = conditions_bind[zone_usage][
"use_constant_infiltration"
]
use_cond.infiltration_rate = conditions_bind[zone_usage]["infiltration_rate"]
use_cond.max_user_infiltration = conditions_bind[zone_usage][
"max_user_infiltration"
]
use_cond.max_overheating_infiltration = conditions_bind[zone_usage][
"max_overheating_infiltration"
]
use_cond.max_summer_infiltration = conditions_bind[zone_usage][
"max_summer_infiltration"
]
use_cond.winter_reduction_infiltration = conditions_bind[zone_usage][
"winter_reduction_infiltration"
]
use_cond.min_ahu = conditions_bind[zone_usage]["min_ahu"]
use_cond.max_ahu = conditions_bind[zone_usage]["max_ahu"]
use_cond.with_ahu = conditions_bind[zone_usage]["with_ahu"]
use_cond.heating_profile = conditions_bind[zone_usage]["heating_profile"]
use_cond.cooling_profile = conditions_bind[zone_usage]["cooling_profile"]
use_cond.persons_profile = conditions_bind[zone_usage]["persons_profile"]
use_cond.machines_profile = conditions_bind[zone_usage]["machines_profile"]
use_cond.lighting_profile = conditions_bind[zone_usage]["lighting_profile"]
use_cond.with_ideal_thresholds = conditions_bind[zone_usage][
"with_ideal_thresholds"
] | teaser/data/input/usecond_input.py | def load_use_conditions(use_cond, zone_usage, data_class):
"""Load use conditions from JSON, according to DIN 18599,
SIA2024 in addition some AixLib specific use conditions for central AHU
are defined.
Parameters
----------
use_cond : UseConditions()
Instance of TEASERs
BuildingObjects.UseConditions
zone_usage : str
code list for zone_usage according to 18599
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that.
"""
conditions_bind = data_class.conditions_bind
use_cond.usage = zone_usage
use_cond.typical_length = conditions_bind[zone_usage]["typical_length"]
use_cond.typical_width = conditions_bind[zone_usage]["typical_width"]
use_cond.with_heating = conditions_bind[zone_usage]["with_heating"]
use_cond.T_threshold_heating = conditions_bind[zone_usage]["T_threshold_heating"]
use_cond.T_threshold_cooling = conditions_bind[zone_usage]["T_threshold_cooling"]
use_cond.with_cooling = conditions_bind[zone_usage]["with_cooling"]
use_cond.fixed_heat_flow_rate_persons = conditions_bind[zone_usage][
"fixed_heat_flow_rate_persons"
]
use_cond.activity_degree_persons = conditions_bind[zone_usage][
"activity_degree_persons"
]
use_cond.persons = conditions_bind[zone_usage]["persons"]
use_cond.internal_gains_moisture_no_people = conditions_bind[zone_usage][
"internal_gains_moisture_no_people"
]
use_cond.ratio_conv_rad_persons = conditions_bind[zone_usage][
"ratio_conv_rad_persons"
]
use_cond.machines = conditions_bind[zone_usage]["machines"]
use_cond.ratio_conv_rad_machines = conditions_bind[zone_usage][
"ratio_conv_rad_machines"
]
use_cond.lighting_power = conditions_bind[zone_usage]["lighting_power"]
use_cond.ratio_conv_rad_lighting = conditions_bind[zone_usage][
"ratio_conv_rad_lighting"
]
use_cond.use_constant_infiltration = conditions_bind[zone_usage][
"use_constant_infiltration"
]
use_cond.infiltration_rate = conditions_bind[zone_usage]["infiltration_rate"]
use_cond.max_user_infiltration = conditions_bind[zone_usage][
"max_user_infiltration"
]
use_cond.max_overheating_infiltration = conditions_bind[zone_usage][
"max_overheating_infiltration"
]
use_cond.max_summer_infiltration = conditions_bind[zone_usage][
"max_summer_infiltration"
]
use_cond.winter_reduction_infiltration = conditions_bind[zone_usage][
"winter_reduction_infiltration"
]
use_cond.min_ahu = conditions_bind[zone_usage]["min_ahu"]
use_cond.max_ahu = conditions_bind[zone_usage]["max_ahu"]
use_cond.with_ahu = conditions_bind[zone_usage]["with_ahu"]
use_cond.heating_profile = conditions_bind[zone_usage]["heating_profile"]
use_cond.cooling_profile = conditions_bind[zone_usage]["cooling_profile"]
use_cond.persons_profile = conditions_bind[zone_usage]["persons_profile"]
use_cond.machines_profile = conditions_bind[zone_usage]["machines_profile"]
use_cond.lighting_profile = conditions_bind[zone_usage]["lighting_profile"]
use_cond.with_ideal_thresholds = conditions_bind[zone_usage][
"with_ideal_thresholds"
] | 0.774455 | 0.243654 |
import boto3
import os
from boto3.dynamodb.conditions import Key, Attr
# event["userId", "rank1", "rank2", "createdAt"]
def lambda_handler(event, context):
print(event)
dynamo = boto3.resource('dynamodb')
picks_table = dynamo.Table(os.environ["picks_table"])
available_players_table = dynamo.Table(os.environ["available_players_table"])
return_error = {
"playerId": "",
"overUnder": "",
"rank": 0,
"userId": "",
"pickId": ""
}
print('userId', event["userId"])
response = picks_table.query(KeyConditionExpression=Key('userId').eq(event["userId"]))
print('response items', response['Items'])
picks_to_switch = [a for a in response['Items'] if int(a["rank"]) == int(event["rank1"]) or int(a["rank"]) == int(event["rank2"])]
for pick in picks_to_switch:
response = available_players_table.query(KeyConditionExpression=Key('id').eq(pick["playerId"]))
items = response['Items']
if len(items) > 0:
if 'signedAt' in items[0]:
return_error["pickId"] = items[0]["first_name"] + " " + items[0]["last_name"] + " has already signed a contract"
return [return_error]
else:
return_error["pickId"] = pick["playerId"] + " does not exist"
return [return_error]
print('picks_to_switch', picks_to_switch)
if (len(picks_to_switch) == 2):
picks_table.update_item(
Key={ 'playerId': picks_to_switch[0]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': int(picks_to_switch[1]["rank"]), ':createdAt': event["createdAt"]}
)
picks_table.update_item(
Key={ 'playerId': picks_to_switch[1]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': int(picks_to_switch[0]["rank"]), ':createdAt': event["createdAt"]}
)
elif (len(picks_to_switch) == 1):
new_rank = event["rank1"]
if picks_to_switch[0]["rank"] == new_rank:
new_rank = event["rank2"]
picks_table.update_item(
Key={ 'playerId': picks_to_switch[0]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': new_rank, ':createdAt': event["createdAt"]}
)
response = picks_table.query(KeyConditionExpression=Key('userId').eq(event["userId"]))
return response["Items"] | lambda/freeAgentDraft2019SwitchPicks/lambda_function.py | import boto3
import os
from boto3.dynamodb.conditions import Key, Attr
# event["userId", "rank1", "rank2", "createdAt"]
def lambda_handler(event, context):
print(event)
dynamo = boto3.resource('dynamodb')
picks_table = dynamo.Table(os.environ["picks_table"])
available_players_table = dynamo.Table(os.environ["available_players_table"])
return_error = {
"playerId": "",
"overUnder": "",
"rank": 0,
"userId": "",
"pickId": ""
}
print('userId', event["userId"])
response = picks_table.query(KeyConditionExpression=Key('userId').eq(event["userId"]))
print('response items', response['Items'])
picks_to_switch = [a for a in response['Items'] if int(a["rank"]) == int(event["rank1"]) or int(a["rank"]) == int(event["rank2"])]
for pick in picks_to_switch:
response = available_players_table.query(KeyConditionExpression=Key('id').eq(pick["playerId"]))
items = response['Items']
if len(items) > 0:
if 'signedAt' in items[0]:
return_error["pickId"] = items[0]["first_name"] + " " + items[0]["last_name"] + " has already signed a contract"
return [return_error]
else:
return_error["pickId"] = pick["playerId"] + " does not exist"
return [return_error]
print('picks_to_switch', picks_to_switch)
if (len(picks_to_switch) == 2):
picks_table.update_item(
Key={ 'playerId': picks_to_switch[0]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': int(picks_to_switch[1]["rank"]), ':createdAt': event["createdAt"]}
)
picks_table.update_item(
Key={ 'playerId': picks_to_switch[1]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': int(picks_to_switch[0]["rank"]), ':createdAt': event["createdAt"]}
)
elif (len(picks_to_switch) == 1):
new_rank = event["rank1"]
if picks_to_switch[0]["rank"] == new_rank:
new_rank = event["rank2"]
picks_table.update_item(
Key={ 'playerId': picks_to_switch[0]["playerId"], 'userId': event["userId"] },
UpdateExpression='Set #rank = :rank, createdAt = :createdAt',
ExpressionAttributeNames = { "#rank": "rank" },
ExpressionAttributeValues={':rank': new_rank, ':createdAt': event["createdAt"]}
)
response = picks_table.query(KeyConditionExpression=Key('userId').eq(event["userId"]))
return response["Items"] | 0.288068 | 0.161883 |
import numpy as np
import pytest
from scipy.stats import norm as nm
import orbitize.priors as priors
threshold = 1e-1
initialization_inputs = {
priors.GaussianPrior : [1000., 1.],
priors.LogUniformPrior : [1., 2.],
priors.UniformPrior : [0., 1.],
priors.SinPrior : [],
priors.LinearPrior : [-2., 2.]
}
expected_means_mins_maxes = {
priors.GaussianPrior : (1000.,0.,np.inf),
priors.LogUniformPrior : (1/np.log(2),1., 2.),
priors.UniformPrior : (0.5, 0., 1.),
priors.SinPrior : (np.pi/2., 0., np.pi),
priors.LinearPrior : (1./3.,0.,1.0)
}
lnprob_inputs = {
priors.GaussianPrior : np.array([-3.0, np.inf, 1000., 999.]),
priors.LogUniformPrior : np.array([-1., 0., 1., 1.5, 2., 2.5]),
priors.UniformPrior : np.array([0., 0.5, 1., -1., 2.]),
priors.SinPrior : np.array([0., np.pi/2., np.pi, 10., -1.]),
priors.LinearPrior : np.array([0., 0.5, 1., 2., -1.])
}
expected_probs = {
priors.GaussianPrior : np.array([0., 0., nm(1000.,1.).pdf(1000.), nm(1000.,1.).pdf(999.)]),
priors.LogUniformPrior : np.array([0., 0., 1., 2./3., 0.5, 0.])/np.log(2),
priors.UniformPrior : np.array([1., 1., 1., 0., 0.]),
priors.SinPrior : np.array([0., 0.5, 0., 0., 0.]),
priors.LinearPrior : np.array([2., 1., 0., 0., 0.])
}
def test_draw_samples():
"""
Test basic functionality of `draw_samples()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
samples = TestPrior.draw_samples(10000)
exp_mean, exp_min, exp_max = expected_means_mins_maxes[Prior]
assert np.mean(samples) == pytest.approx(exp_mean, abs=threshold)
assert np.min(samples) > exp_min
assert np.max(samples) < exp_max
def test_compute_lnprob():
"""
Test basic functionality of `compute_lnprob()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
values2test = lnprob_inputs[Prior]
lnprobs = TestPrior.compute_lnprob(values2test)
assert np.log(expected_probs[Prior]) == pytest.approx(lnprobs, abs=threshold)
if __name__=='__main__':
test_compute_lnprob()
test_draw_samples() | tests/test_priors.py | import numpy as np
import pytest
from scipy.stats import norm as nm
import orbitize.priors as priors
threshold = 1e-1
initialization_inputs = {
priors.GaussianPrior : [1000., 1.],
priors.LogUniformPrior : [1., 2.],
priors.UniformPrior : [0., 1.],
priors.SinPrior : [],
priors.LinearPrior : [-2., 2.]
}
expected_means_mins_maxes = {
priors.GaussianPrior : (1000.,0.,np.inf),
priors.LogUniformPrior : (1/np.log(2),1., 2.),
priors.UniformPrior : (0.5, 0., 1.),
priors.SinPrior : (np.pi/2., 0., np.pi),
priors.LinearPrior : (1./3.,0.,1.0)
}
lnprob_inputs = {
priors.GaussianPrior : np.array([-3.0, np.inf, 1000., 999.]),
priors.LogUniformPrior : np.array([-1., 0., 1., 1.5, 2., 2.5]),
priors.UniformPrior : np.array([0., 0.5, 1., -1., 2.]),
priors.SinPrior : np.array([0., np.pi/2., np.pi, 10., -1.]),
priors.LinearPrior : np.array([0., 0.5, 1., 2., -1.])
}
expected_probs = {
priors.GaussianPrior : np.array([0., 0., nm(1000.,1.).pdf(1000.), nm(1000.,1.).pdf(999.)]),
priors.LogUniformPrior : np.array([0., 0., 1., 2./3., 0.5, 0.])/np.log(2),
priors.UniformPrior : np.array([1., 1., 1., 0., 0.]),
priors.SinPrior : np.array([0., 0.5, 0., 0., 0.]),
priors.LinearPrior : np.array([2., 1., 0., 0., 0.])
}
def test_draw_samples():
"""
Test basic functionality of `draw_samples()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
samples = TestPrior.draw_samples(10000)
exp_mean, exp_min, exp_max = expected_means_mins_maxes[Prior]
assert np.mean(samples) == pytest.approx(exp_mean, abs=threshold)
assert np.min(samples) > exp_min
assert np.max(samples) < exp_max
def test_compute_lnprob():
"""
Test basic functionality of `compute_lnprob()` method of each `Prior` class.
"""
for Prior in initialization_inputs.keys():
inputs = initialization_inputs[Prior]
TestPrior = Prior(*inputs)
values2test = lnprob_inputs[Prior]
lnprobs = TestPrior.compute_lnprob(values2test)
assert np.log(expected_probs[Prior]) == pytest.approx(lnprobs, abs=threshold)
if __name__=='__main__':
test_compute_lnprob()
test_draw_samples() | 0.676086 | 0.800614 |
from gbdxtools import Interface
from gbdxtools.workflow import Workflow
from auth_mock import get_mock_gbdx_session
import vcr
import unittest
import os
import json
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
class WorkflowTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create mock session, replace dummytoken with real token to create cassette
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
# setup mock data paths
cls.data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data"))
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
wf = Workflow()
self.assertTrue(isinstance(wf, Workflow))
self.assertTrue(wf.s3 is not None)
self.assertTrue(wf.gbdx_connection is not None)
@vcr.use_cassette('tests/unit/cassettes/test_batch_workflows.yaml', filter_headers=['authorization'])
def test_batch_workflows(self):
"""
tests all 3 endpoints for batch workflows, create, fetch, and cancel
:return:
"""
wf = Workflow()
with open(os.path.join(self.data_path, "batch_workflow.json")) as json_file:
self.batch_workflow_json = json.loads(json_file.read())
# test create
batch_workflow_id = wf.launch_batch_workflow(self.batch_workflow_json)
# test status
batch_workflow_status = wf.batch_workflow_status(batch_workflow_id)
self.assertEqual(batch_workflow_id, batch_workflow_status.get("batch_workflow_id"))
# test cancel
batch_workflow_status = wf.batch_workflow_cancel(batch_workflow_id)
workflows = batch_workflow_status.get('workflows')
for workflow in workflows:
self.assertTrue(workflow.get('state') in ["canceling", "canceled"])
@vcr.use_cassette('tests/unit/cassettes/test_workflow_get.yaml', filter_headers=['authorization'])
def test_workflow_get(self):
"""
test gbdx.workflows.get(<workflow_id>)
"""
wf = Workflow()
output = wf.get('4488969848362445219')
self.assertTrue('id' in output.keys())
self.assertTrue('owner' in output.keys())
self.assertTrue('submitted_time' in output.keys())
self.assertTrue('state' in output.keys())
self.assertTrue('callback' in output.keys())
self.assertTrue('tasks' in output.keys())
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stdout.yaml', filter_headers=['authorization'])
def test_task_get_stdout(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow()
output = wf.get_stdout('4488969848362445219','4488969848354891944')
self.assertTrue(len(output) > 0)
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stderr.yaml', filter_headers=['authorization'])
def test_task_get_stderr(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow()
output = wf.get_stderr('4488969848362445219','4488969848354891944')
self.assertEquals('<empty>', output)
@vcr.use_cassette('tests/unit/cassettes/test_workflow_search.yaml', filter_headers=['authorization'])
def test_workflow_search(self):
"""
test gbdx.workflow.search(lookback_h=<hours>, state=<state>, owner=<owner>)
"""
wf = Workflow()
output = wf.search(lookback_h=12, state='all')
self.assertTrue(len(output), 0) | tests/unit/test_workflow.py | from gbdxtools import Interface
from gbdxtools.workflow import Workflow
from auth_mock import get_mock_gbdx_session
import vcr
import unittest
import os
import json
"""
How to use the mock_gbdx_session and vcr to create unit tests:
1. Add a new test that is dependent upon actually hitting GBDX APIs.
2. Decorate the test with @vcr appropriately, supply a yaml file path to gbdxtools/tests/unit/cassettes
note: a yaml file will be created after the test is run
3. Replace "dummytoken" with a real gbdx token after running test successfully
4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
5. Replace the real gbdx token with "dummytoken" again
6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
"""
class WorkflowTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create mock session, replace dummytoken with real token to create cassette
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
# setup mock data paths
cls.data_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data"))
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
wf = Workflow()
self.assertTrue(isinstance(wf, Workflow))
self.assertTrue(wf.s3 is not None)
self.assertTrue(wf.gbdx_connection is not None)
@vcr.use_cassette('tests/unit/cassettes/test_batch_workflows.yaml', filter_headers=['authorization'])
def test_batch_workflows(self):
"""
tests all 3 endpoints for batch workflows, create, fetch, and cancel
:return:
"""
wf = Workflow()
with open(os.path.join(self.data_path, "batch_workflow.json")) as json_file:
self.batch_workflow_json = json.loads(json_file.read())
# test create
batch_workflow_id = wf.launch_batch_workflow(self.batch_workflow_json)
# test status
batch_workflow_status = wf.batch_workflow_status(batch_workflow_id)
self.assertEqual(batch_workflow_id, batch_workflow_status.get("batch_workflow_id"))
# test cancel
batch_workflow_status = wf.batch_workflow_cancel(batch_workflow_id)
workflows = batch_workflow_status.get('workflows')
for workflow in workflows:
self.assertTrue(workflow.get('state') in ["canceling", "canceled"])
@vcr.use_cassette('tests/unit/cassettes/test_workflow_get.yaml', filter_headers=['authorization'])
def test_workflow_get(self):
"""
test gbdx.workflows.get(<workflow_id>)
"""
wf = Workflow()
output = wf.get('4488969848362445219')
self.assertTrue('id' in output.keys())
self.assertTrue('owner' in output.keys())
self.assertTrue('submitted_time' in output.keys())
self.assertTrue('state' in output.keys())
self.assertTrue('callback' in output.keys())
self.assertTrue('tasks' in output.keys())
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stdout.yaml', filter_headers=['authorization'])
def test_task_get_stdout(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow()
output = wf.get_stdout('4488969848362445219','4488969848354891944')
self.assertTrue(len(output) > 0)
@vcr.use_cassette('tests/unit/cassettes/test_task_get_stderr.yaml', filter_headers=['authorization'])
def test_task_get_stderr(self):
"""
test gbdx.workflows.get_stdout(<workflow_id>,<task_id>)
"""
wf = Workflow()
output = wf.get_stderr('4488969848362445219','4488969848354891944')
self.assertEquals('<empty>', output)
@vcr.use_cassette('tests/unit/cassettes/test_workflow_search.yaml', filter_headers=['authorization'])
def test_workflow_search(self):
"""
test gbdx.workflow.search(lookback_h=<hours>, state=<state>, owner=<owner>)
"""
wf = Workflow()
output = wf.search(lookback_h=12, state='all')
self.assertTrue(len(output), 0) | 0.564339 | 0.408749 |
import sys
import json, re
def load(fn, handle_index=True, handle_v=False):
fp=open(fn, 'r')
return handel_value(handle_index_on_key(json.load(fp), handle_index), handle_v)
def loads(s, handle_index=True, handle_v=False):
return handel_value(handle_index_on_key(json.loads(s), handle_index), handle_v)
def dump(s, fn, indent=4):
fp=open(fn, 'w')
json.dump(merge_index_on_key(s), fp, indent=indent, sort_keys=True, ensure_ascii=False)
return fn
def dumps(s, indent=4):
return json.dumps(merge_index_on_key(s), indent=indent, sort_keys=True, ensure_ascii=False)
def handel_value(d, handle=False):
if not handle:
return d
for key, value in d.iteritems():
if type(value) is dict:
value = handel_value(value, handle)
else:
if key=='value':
if type(value) in [str, unicode] and len(value.split('\n')) > 1:
value = [ re.sub('^\s+', '', i) for i in value.split('\n') ]
else:
value = value
d[key] = value
return d
def handle_index_on_key(d, handle_index=True):
if not handle_index:
return d
new = {}
for key, value in d.iteritems():
#print key, value
m = re.match(r'^(\d+)\.(\S+.*)', key)
if m:
index = m.group(1)
new_key = m.group(2)
new[new_key] = {}
new[new_key]['index'] = index
else:
new_key = key
new[new_key] = {}
if type(value) is dict:
value = handle_index_on_key(value, handle_index)
new[new_key].update(value)
else:
new[new_key] = value
return new
def merge_index_on_key(d, merge_index=True):
if not merge_index:
return d
if type(d) != dict:
return d
new = {}
for key, value in d.iteritems():
#print key, value
new_key = key
if type(value) == dict:
if value.has_key('index'):
index = value['index']
new_key = '%s.%s' % (index, key)
del value['index']
else:
new_key = key
new[new_key] = {}
if type(value) is dict:
value = merge_index_on_key(value, merge_index)
new[new_key].update(value)
else:
new[new_key] = value
return new
def itervalue(d):
new = {}
for key, value in d.iteritems():
#print key, value
if type(value) == dict:
if value.has_key('value'):
new[key] = value['value']
value = itervalue(value)
new.update(value)
return new
if __name__ == "__main__":
# sys.path.append("../")
# a = ['a', 'b', 0]
# print dumps(a)
# print dumps(load('/proj/OP4/TEMPLATES/config/400_flow_control.json', handle_v=True))
d= load('/media/sf_depot/onepiece4/examples/200_user_setup.json')
print itervalue(d)
# for ff in iterfind('../').file('*.conf'):
# print "INFO: find config", ff
# conf = config()
# conf.read(ff)
# dict = conf.config_dic()
# #print json.dumps(dict, indent=4, sort_keys=True, ensure_ascii=False)
# json_file = '%s.%s' % (os.path.splitext(ff)[0], "json")
# print json_file
# fp = open(json_file, 'w')
# json.dump(dict, fp, indent=4, sort_keys=True, ensure_ascii=False) | pyop4/opbase/opjson.py | import sys
import json, re
def load(fn, handle_index=True, handle_v=False):
fp=open(fn, 'r')
return handel_value(handle_index_on_key(json.load(fp), handle_index), handle_v)
def loads(s, handle_index=True, handle_v=False):
return handel_value(handle_index_on_key(json.loads(s), handle_index), handle_v)
def dump(s, fn, indent=4):
fp=open(fn, 'w')
json.dump(merge_index_on_key(s), fp, indent=indent, sort_keys=True, ensure_ascii=False)
return fn
def dumps(s, indent=4):
return json.dumps(merge_index_on_key(s), indent=indent, sort_keys=True, ensure_ascii=False)
def handel_value(d, handle=False):
if not handle:
return d
for key, value in d.iteritems():
if type(value) is dict:
value = handel_value(value, handle)
else:
if key=='value':
if type(value) in [str, unicode] and len(value.split('\n')) > 1:
value = [ re.sub('^\s+', '', i) for i in value.split('\n') ]
else:
value = value
d[key] = value
return d
def handle_index_on_key(d, handle_index=True):
if not handle_index:
return d
new = {}
for key, value in d.iteritems():
#print key, value
m = re.match(r'^(\d+)\.(\S+.*)', key)
if m:
index = m.group(1)
new_key = m.group(2)
new[new_key] = {}
new[new_key]['index'] = index
else:
new_key = key
new[new_key] = {}
if type(value) is dict:
value = handle_index_on_key(value, handle_index)
new[new_key].update(value)
else:
new[new_key] = value
return new
def merge_index_on_key(d, merge_index=True):
if not merge_index:
return d
if type(d) != dict:
return d
new = {}
for key, value in d.iteritems():
#print key, value
new_key = key
if type(value) == dict:
if value.has_key('index'):
index = value['index']
new_key = '%s.%s' % (index, key)
del value['index']
else:
new_key = key
new[new_key] = {}
if type(value) is dict:
value = merge_index_on_key(value, merge_index)
new[new_key].update(value)
else:
new[new_key] = value
return new
def itervalue(d):
new = {}
for key, value in d.iteritems():
#print key, value
if type(value) == dict:
if value.has_key('value'):
new[key] = value['value']
value = itervalue(value)
new.update(value)
return new
if __name__ == "__main__":
# sys.path.append("../")
# a = ['a', 'b', 0]
# print dumps(a)
# print dumps(load('/proj/OP4/TEMPLATES/config/400_flow_control.json', handle_v=True))
d= load('/media/sf_depot/onepiece4/examples/200_user_setup.json')
print itervalue(d)
# for ff in iterfind('../').file('*.conf'):
# print "INFO: find config", ff
# conf = config()
# conf.read(ff)
# dict = conf.config_dic()
# #print json.dumps(dict, indent=4, sort_keys=True, ensure_ascii=False)
# json_file = '%s.%s' % (os.path.splitext(ff)[0], "json")
# print json_file
# fp = open(json_file, 'w')
# json.dump(dict, fp, indent=4, sort_keys=True, ensure_ascii=False) | 0.053539 | 0.158826 |
import json
import GlobalTools
def getRivalsScores(hash):
with GlobalTools.dbLock:
GlobalTools.cur.execute('''
SELECT rr.name AS name, rr.id AS id, ss.clear AS clear, ss.notes AS notes,
ss.pg*2+ss.gr AS score, ss.minbp AS minbp, rr.active AS active
FROM rivals AS rr INNER JOIN scores AS ss ON rr.id=ss.id
WHERE rr.active>0 AND ss.hash=?
ORDER BY score DESC
''',(hash,))
played=GlobalTools.cur.fetchall()
GlobalTools.cur.execute('''
SELECT name, id, 0 AS clear, 0 AS notes,
0 AS score, 0 AS minbp, active
FROM rivals
WHERE active>0 AND NOT id IN(
SELECT id FROM scores WHERE hash=?
)
''',(hash,))
notplayed=GlobalTools.cur.fetchall()
scores=played+notplayed
p_lamp=0
p_score=0
p_bp=0
for score in scores:
if score['active'] == 2:
p_lamp=score['clear']
p_score=score['score']
p_bp=score['minbp']
break
result=[]
prev_score=99999999
prev_ranking=0
cnt=0
for score in scores:
cnt+=1
rr={}
rr['id']=str(score['id'])
rr['name']= GlobalTools.convertHTMLEntities(score['name'])
rr['name_class']=''
if score['active']==2 : rr['name_class']=' pid'
rr['lamp']=str(score['clear'])
rr['lamp_class']=' NO'
if score['clear'] == 1 : rr['lamp_class']=' FA'
elif score['clear'] == 2 : rr['lamp_class']=' EC'
elif score['clear'] == 3 : rr['lamp_class']=' CL'
elif score['clear'] == 4 : rr['lamp_class']=' HC'
elif score['clear'] == 5 : rr['lamp_class']=' FC'
rr['challenge']=''
rr['challenge_class']=''
if p_lamp > 0 and score['active']!=2:
rr['challenge']='+'
rr['challenge_class']=' add'
rr['bp']=''
rr['bp_class']=''
rr['bpdiff']=''
rr['bpdiff_class']=''
rr['score']=''
rr['score_class']=''
rr['score_rate']=''
rr['scorediff']=''
rr['scorediff_class']=''
rr['ranking']=''
rr['ranking_class']=''
if score['clear'] > 0:
rr['bp']=str(score['minbp'])
if score['minbp']==0 : rr['bp_class']=' bp0'
if p_lamp > 0:
temp=p_bp-score['minbp']
rr['bpdiff']=str(temp)
rr['bpdiff_class']=' TIE'
if temp > 0 :
rr['bpdiff_class']=' LOSE'
rr['bpdiff']='+'+rr['bpdiff']
elif temp < 0 : rr['bpdiff_class']=' WIN'
else: rr['bpdiff']='-'
sc=score['score']
total=score['notes']*2
rr['score']=' ('+str(sc)+')'
rr['score_class']=' BF'
if sc*9>=total*9 : rr['score_class']=' MAX'
elif sc*9>=total*8 : rr['score_class']=' AAA'
elif sc*9>=total*7 : rr['score_class']=' AA'
elif sc*9>=total*6 : rr['score_class']=' A'
rr['score_rate']='%.2f%%' % (float(sc)/float(total)*100.0)
if p_lamp > 0:
temp=p_score-sc
rr['scorediff']=str(temp)
rr['scorediff_class']=' TIE'
if temp > 0 :
rr['scorediff_class']=' WIN'
rr['scorediff']='+'+rr['scorediff']
elif temp < 0 : rr['scorediff_class']=' LOSE'
else: rr['scorediff']='-'
temp=sc - prev_score
if temp < 0 :
prev_ranking=cnt
prev_score=sc
rr['ranking']=str(prev_ranking)
if prev_ranking == 1: rr['ranking_class']=' TOP1'
elif prev_ranking == 2: rr['ranking_class']=' TOP2'
elif prev_ranking == 3: rr['ranking_class']=' TOP3'
else: rr['lamp']=''
result.append(rr)
return result
def handleRankingRequest(q_dict):
res=GlobalTools.SimpleHTTPResponse()
if 'hash' in q_dict: scores=getRivalsScores(q_dict['hash'])
else: scores=[]
if len(scores)==0:
rr='<div class="small-title">Failed to load data.</div>'
else:
try:
rr='<table style="width:465px">'
rr+='''<thead>
<tr>
<th class="ranking leftborder2">#</th>
<th class="name">Name</th>
<th class="lamp leftborder">L</th>
<th class="bp">BP</th>
<th class="bpdiff">Diff</th>
<th class="score">Score</th>
<th class="scorediff rightborder2">Diff</th>
</tr>
</thead>'''
rr+='<tbody>'
for s in scores:
rr+='<tr>'
rr+='<td class="ranking leftborder%s">%s</td>'%(s['ranking_class'],s['ranking'])
rr+='<td class="name%s"><a target="_blank" href="http://www.dream-pro.info/~lavalse/LR2IR/search.cgi?mode=mypage&playerid=%s">%s</a></td>'%(s['name_class'],s['id'],s['name'])
rr+='<td class="lamp leftborder%s"><span style="display:none">%s</span></td>'%(s['lamp_class'],s['lamp'])
rr+='<td class="bp%s%s">%s</td>'%(s['bp_class'],s['lamp_class'],s['bp'])
rr+='<td class="bpdiff%s%s">%s</td>'%(s['bpdiff_class'],s['lamp_class'],s['bpdiff'])
rr+='<td class="score%s"><div class="%s" style="width:%s"> %s%s</div></td>'%(s['lamp_class'],s['score_class'],s['score_rate'],s['score_rate'],s['score'])
rr+='<td class="scorediff rightborder%s%s">%s</td>'%(s['scorediff_class'],s['lamp_class'],s['scorediff'])
rr+='</tr>'
rr+='</tbody></table>'
except Exception as e: rr='<div class="small-title">Failed to load data.</div>'
body=rr.encode('utf-8')
return res,body | tools/RankingRequestHandler.py | import json
import GlobalTools
def getRivalsScores(hash):
with GlobalTools.dbLock:
GlobalTools.cur.execute('''
SELECT rr.name AS name, rr.id AS id, ss.clear AS clear, ss.notes AS notes,
ss.pg*2+ss.gr AS score, ss.minbp AS minbp, rr.active AS active
FROM rivals AS rr INNER JOIN scores AS ss ON rr.id=ss.id
WHERE rr.active>0 AND ss.hash=?
ORDER BY score DESC
''',(hash,))
played=GlobalTools.cur.fetchall()
GlobalTools.cur.execute('''
SELECT name, id, 0 AS clear, 0 AS notes,
0 AS score, 0 AS minbp, active
FROM rivals
WHERE active>0 AND NOT id IN(
SELECT id FROM scores WHERE hash=?
)
''',(hash,))
notplayed=GlobalTools.cur.fetchall()
scores=played+notplayed
p_lamp=0
p_score=0
p_bp=0
for score in scores:
if score['active'] == 2:
p_lamp=score['clear']
p_score=score['score']
p_bp=score['minbp']
break
result=[]
prev_score=99999999
prev_ranking=0
cnt=0
for score in scores:
cnt+=1
rr={}
rr['id']=str(score['id'])
rr['name']= GlobalTools.convertHTMLEntities(score['name'])
rr['name_class']=''
if score['active']==2 : rr['name_class']=' pid'
rr['lamp']=str(score['clear'])
rr['lamp_class']=' NO'
if score['clear'] == 1 : rr['lamp_class']=' FA'
elif score['clear'] == 2 : rr['lamp_class']=' EC'
elif score['clear'] == 3 : rr['lamp_class']=' CL'
elif score['clear'] == 4 : rr['lamp_class']=' HC'
elif score['clear'] == 5 : rr['lamp_class']=' FC'
rr['challenge']=''
rr['challenge_class']=''
if p_lamp > 0 and score['active']!=2:
rr['challenge']='+'
rr['challenge_class']=' add'
rr['bp']=''
rr['bp_class']=''
rr['bpdiff']=''
rr['bpdiff_class']=''
rr['score']=''
rr['score_class']=''
rr['score_rate']=''
rr['scorediff']=''
rr['scorediff_class']=''
rr['ranking']=''
rr['ranking_class']=''
if score['clear'] > 0:
rr['bp']=str(score['minbp'])
if score['minbp']==0 : rr['bp_class']=' bp0'
if p_lamp > 0:
temp=p_bp-score['minbp']
rr['bpdiff']=str(temp)
rr['bpdiff_class']=' TIE'
if temp > 0 :
rr['bpdiff_class']=' LOSE'
rr['bpdiff']='+'+rr['bpdiff']
elif temp < 0 : rr['bpdiff_class']=' WIN'
else: rr['bpdiff']='-'
sc=score['score']
total=score['notes']*2
rr['score']=' ('+str(sc)+')'
rr['score_class']=' BF'
if sc*9>=total*9 : rr['score_class']=' MAX'
elif sc*9>=total*8 : rr['score_class']=' AAA'
elif sc*9>=total*7 : rr['score_class']=' AA'
elif sc*9>=total*6 : rr['score_class']=' A'
rr['score_rate']='%.2f%%' % (float(sc)/float(total)*100.0)
if p_lamp > 0:
temp=p_score-sc
rr['scorediff']=str(temp)
rr['scorediff_class']=' TIE'
if temp > 0 :
rr['scorediff_class']=' WIN'
rr['scorediff']='+'+rr['scorediff']
elif temp < 0 : rr['scorediff_class']=' LOSE'
else: rr['scorediff']='-'
temp=sc - prev_score
if temp < 0 :
prev_ranking=cnt
prev_score=sc
rr['ranking']=str(prev_ranking)
if prev_ranking == 1: rr['ranking_class']=' TOP1'
elif prev_ranking == 2: rr['ranking_class']=' TOP2'
elif prev_ranking == 3: rr['ranking_class']=' TOP3'
else: rr['lamp']=''
result.append(rr)
return result
def handleRankingRequest(q_dict):
res=GlobalTools.SimpleHTTPResponse()
if 'hash' in q_dict: scores=getRivalsScores(q_dict['hash'])
else: scores=[]
if len(scores)==0:
rr='<div class="small-title">Failed to load data.</div>'
else:
try:
rr='<table style="width:465px">'
rr+='''<thead>
<tr>
<th class="ranking leftborder2">#</th>
<th class="name">Name</th>
<th class="lamp leftborder">L</th>
<th class="bp">BP</th>
<th class="bpdiff">Diff</th>
<th class="score">Score</th>
<th class="scorediff rightborder2">Diff</th>
</tr>
</thead>'''
rr+='<tbody>'
for s in scores:
rr+='<tr>'
rr+='<td class="ranking leftborder%s">%s</td>'%(s['ranking_class'],s['ranking'])
rr+='<td class="name%s"><a target="_blank" href="http://www.dream-pro.info/~lavalse/LR2IR/search.cgi?mode=mypage&playerid=%s">%s</a></td>'%(s['name_class'],s['id'],s['name'])
rr+='<td class="lamp leftborder%s"><span style="display:none">%s</span></td>'%(s['lamp_class'],s['lamp'])
rr+='<td class="bp%s%s">%s</td>'%(s['bp_class'],s['lamp_class'],s['bp'])
rr+='<td class="bpdiff%s%s">%s</td>'%(s['bpdiff_class'],s['lamp_class'],s['bpdiff'])
rr+='<td class="score%s"><div class="%s" style="width:%s"> %s%s</div></td>'%(s['lamp_class'],s['score_class'],s['score_rate'],s['score_rate'],s['score'])
rr+='<td class="scorediff rightborder%s%s">%s</td>'%(s['scorediff_class'],s['lamp_class'],s['scorediff'])
rr+='</tr>'
rr+='</tbody></table>'
except Exception as e: rr='<div class="small-title">Failed to load data.</div>'
body=rr.encode('utf-8')
return res,body | 0.055797 | 0.078678 |
import django.contrib.gis.db.models.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("air_quality", "0005_auto_20190614_2307"),
]
operations = [
migrations.CreateModel(
name="Calibration",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("calibrated_at", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="Collection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("starts_at", models.DateTimeField()),
("ends_at", models.DateTimeField()),
("route", models.CharField(max_length=256, null=True)),
],
),
migrations.CreateModel(
name="CollectionFile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("file", models.FileField(default="", upload_to="data")),
("uploaded_at", models.DateTimeField(auto_now_add=True)),
("processor_version", models.CharField(max_length=256)),
("processed_at", models.DateTimeField(auto_now=True)),
(
"collection",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.Collection",
),
),
],
),
migrations.CreateModel(
name="Pollutant",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
("description", models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name="PollutantValue",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("value", models.FloatField()),
(
"collection_file",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.CollectionFile",
),
),
(
"pollutant",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Pollutant",
),
),
],
),
migrations.CreateModel(
name="TimeGeo",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("location", django.contrib.gis.db.models.fields.PointField(srid=4326)),
("time", models.DateTimeField()),
(
"collection_file",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.CollectionFile",
),
),
],
),
migrations.RemoveField(model_name="data", name="session"),
migrations.RemoveField(model_name="session", name="collected_by"),
migrations.RemoveField(model_name="session", name="route"),
migrations.AlterUniqueTogether(name="sessiondata", unique_together=set()),
migrations.RemoveField(model_name="sessiondata", name="sensor"),
migrations.RemoveField(model_name="sessiondata", name="session"),
migrations.RemoveField(model_name="sessiondata", name="uploaded_by"),
migrations.RenameField(
model_name="device", old_name="firmware_version", new_name="firmware"
),
migrations.RenameField(
model_name="device", old_name="manufacturer", new_name="serial"
),
migrations.RemoveField(model_name="device", name="calibration_date"),
migrations.RemoveField(model_name="device", name="model_number"),
migrations.RemoveField(model_name="device", name="serial_number"),
migrations.AlterField(
model_name="sensor",
name="device",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Device",
),
),
migrations.AlterField(
model_name="sensor",
name="unit",
field=models.CharField(
choices=[
("mg/m3", "mg/m3"),
("ppm", "ppm"),
("g/m3", "g/m3"),
("PM10", "PM10"),
("PM2.5", "PM2.5"),
("μg/m3", "μg/m3"),
("latlon", "latlon"),
],
help_text="Measurement unit, e.g., mg/m3, ppm, etc.",
max_length=256,
),
),
migrations.DeleteModel(name="Data"),
migrations.DeleteModel(name="Route"),
migrations.DeleteModel(name="Session"),
migrations.DeleteModel(name="SessionData"),
migrations.AddField(
model_name="pollutantvalue",
name="time_geo",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="air_quality.TimeGeo"
),
),
migrations.AddField(
model_name="collectionfile",
name="sensor",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Sensor",
),
),
migrations.AddField(
model_name="collectionfile",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="calibration",
name="sensor",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="air_quality.Sensor"
),
),
migrations.AddField(
model_name="calibration",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="sensor",
name="pollutant",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Pollutant",
),
),
] | api/woeip/apps/air_quality/migrations/0006_auto_20190726_1709.py | import django.contrib.gis.db.models.fields
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("air_quality", "0005_auto_20190614_2307"),
]
operations = [
migrations.CreateModel(
name="Calibration",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("calibrated_at", models.DateTimeField(null=True)),
],
),
migrations.CreateModel(
name="Collection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("starts_at", models.DateTimeField()),
("ends_at", models.DateTimeField()),
("route", models.CharField(max_length=256, null=True)),
],
),
migrations.CreateModel(
name="CollectionFile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("file", models.FileField(default="", upload_to="data")),
("uploaded_at", models.DateTimeField(auto_now_add=True)),
("processor_version", models.CharField(max_length=256)),
("processed_at", models.DateTimeField(auto_now=True)),
(
"collection",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.Collection",
),
),
],
),
migrations.CreateModel(
name="Pollutant",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
("description", models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name="PollutantValue",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("value", models.FloatField()),
(
"collection_file",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.CollectionFile",
),
),
(
"pollutant",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Pollutant",
),
),
],
),
migrations.CreateModel(
name="TimeGeo",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("location", django.contrib.gis.db.models.fields.PointField(srid=4326)),
("time", models.DateTimeField()),
(
"collection_file",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="air_quality.CollectionFile",
),
),
],
),
migrations.RemoveField(model_name="data", name="session"),
migrations.RemoveField(model_name="session", name="collected_by"),
migrations.RemoveField(model_name="session", name="route"),
migrations.AlterUniqueTogether(name="sessiondata", unique_together=set()),
migrations.RemoveField(model_name="sessiondata", name="sensor"),
migrations.RemoveField(model_name="sessiondata", name="session"),
migrations.RemoveField(model_name="sessiondata", name="uploaded_by"),
migrations.RenameField(
model_name="device", old_name="firmware_version", new_name="firmware"
),
migrations.RenameField(
model_name="device", old_name="manufacturer", new_name="serial"
),
migrations.RemoveField(model_name="device", name="calibration_date"),
migrations.RemoveField(model_name="device", name="model_number"),
migrations.RemoveField(model_name="device", name="serial_number"),
migrations.AlterField(
model_name="sensor",
name="device",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Device",
),
),
migrations.AlterField(
model_name="sensor",
name="unit",
field=models.CharField(
choices=[
("mg/m3", "mg/m3"),
("ppm", "ppm"),
("g/m3", "g/m3"),
("PM10", "PM10"),
("PM2.5", "PM2.5"),
("μg/m3", "μg/m3"),
("latlon", "latlon"),
],
help_text="Measurement unit, e.g., mg/m3, ppm, etc.",
max_length=256,
),
),
migrations.DeleteModel(name="Data"),
migrations.DeleteModel(name="Route"),
migrations.DeleteModel(name="Session"),
migrations.DeleteModel(name="SessionData"),
migrations.AddField(
model_name="pollutantvalue",
name="time_geo",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="air_quality.TimeGeo"
),
),
migrations.AddField(
model_name="collectionfile",
name="sensor",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Sensor",
),
),
migrations.AddField(
model_name="collectionfile",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="calibration",
name="sensor",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="air_quality.Sensor"
),
),
migrations.AddField(
model_name="calibration",
name="user",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="sensor",
name="pollutant",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="air_quality.Pollutant",
),
),
] | 0.48121 | 0.136752 |
from __future__ import annotations
import pathlib
from typing import Union
def create_parent_symlink(
destination_path: Union[str, pathlib.Path],
symlink_name: str,
*,
levels: int = 2,
overwrite_symlink: bool = False,
) -> bool:
"""Create a symbolic link in a parent directory,
$levels levels above the link destination.
If a link already exists in that location it can be overwritten.
If a file with the symlink name exists in the location it is left
untouched.
:param destination_path: The full path that is the symlink destination.
:param symlink_name: The name of the symbolic link to be created.
:param levels: The number of levels above the destination path where the
symlink should be created.
:param overwrite_symlink: If the destination exists and is a symlink,
whether it should be overwritten.
:return: True if successful, False otherwise.
"""
destination_path = pathlib.Path(destination_path)
assert destination_path.is_absolute()
assert levels > 0, "symlink must be in parent directory or above"
# Generate path to the symbolic link
link_path = destination_path.parents[levels - 1].joinpath(symlink_name)
# Construct the (relative) destination of the symlink
rel_destination = pathlib.Path(*destination_path.parts[-levels:])
return symlink_to(rel_destination, link_path, overwrite_symlink=overwrite_symlink)
def symlink_to(
link_destination: pathlib.Path,
link_path: pathlib.Path,
*,
target_is_directory: bool = False,
overwrite_symlink: bool = False,
) -> bool:
"""Create a symbolic link.
This function works analogous to os.symlink, but optionally allows
overwriting symbolic links, and instead of raising exceptions returns
True on success and False on failure.
"""
# Bail if the destination is a symbolic link and we do not overwrite links
if link_path.is_symlink():
# Python 3.9+: Could use .readlink() here and return True if correct
if not overwrite_symlink:
return False
elif link_path.exists():
# If it is not a symbolic link AND exists, then also bail.
return False
# Symlinks can't be directly overwritten, so create a temporary symlink next
# to where it should go, and then rename on top of a potentially existing link.
tmp_link = link_path.parent / f".tmp.{link_path.name}"
tmp_link.symlink_to(link_destination, target_is_directory=target_is_directory)
try:
tmp_link.replace(link_path)
except PermissionError as e:
if getattr(e, "winerror", None) == 5:
# Windows can't rename on top, so delete and retry
link_path.unlink()
tmp_link.replace(link_path)
else:
raise
return True | src/zocalo/util/symlink.py | from __future__ import annotations
import pathlib
from typing import Union
def create_parent_symlink(
destination_path: Union[str, pathlib.Path],
symlink_name: str,
*,
levels: int = 2,
overwrite_symlink: bool = False,
) -> bool:
"""Create a symbolic link in a parent directory,
$levels levels above the link destination.
If a link already exists in that location it can be overwritten.
If a file with the symlink name exists in the location it is left
untouched.
:param destination_path: The full path that is the symlink destination.
:param symlink_name: The name of the symbolic link to be created.
:param levels: The number of levels above the destination path where the
symlink should be created.
:param overwrite_symlink: If the destination exists and is a symlink,
whether it should be overwritten.
:return: True if successful, False otherwise.
"""
destination_path = pathlib.Path(destination_path)
assert destination_path.is_absolute()
assert levels > 0, "symlink must be in parent directory or above"
# Generate path to the symbolic link
link_path = destination_path.parents[levels - 1].joinpath(symlink_name)
# Construct the (relative) destination of the symlink
rel_destination = pathlib.Path(*destination_path.parts[-levels:])
return symlink_to(rel_destination, link_path, overwrite_symlink=overwrite_symlink)
def symlink_to(
link_destination: pathlib.Path,
link_path: pathlib.Path,
*,
target_is_directory: bool = False,
overwrite_symlink: bool = False,
) -> bool:
"""Create a symbolic link.
This function works analogous to os.symlink, but optionally allows
overwriting symbolic links, and instead of raising exceptions returns
True on success and False on failure.
"""
# Bail if the destination is a symbolic link and we do not overwrite links
if link_path.is_symlink():
# Python 3.9+: Could use .readlink() here and return True if correct
if not overwrite_symlink:
return False
elif link_path.exists():
# If it is not a symbolic link AND exists, then also bail.
return False
# Symlinks can't be directly overwritten, so create a temporary symlink next
# to where it should go, and then rename on top of a potentially existing link.
tmp_link = link_path.parent / f".tmp.{link_path.name}"
tmp_link.symlink_to(link_destination, target_is_directory=target_is_directory)
try:
tmp_link.replace(link_path)
except PermissionError as e:
if getattr(e, "winerror", None) == 5:
# Windows can't rename on top, so delete and retry
link_path.unlink()
tmp_link.replace(link_path)
else:
raise
return True | 0.9079 | 0.548432 |
import streamlit as st
import os
import requests
from streamlit_folium import folium_static
import folium
def render_streamlit():
url_api = os.getenv('URL_API')
cols = st.columns((1, 3, 1))
cols[1].title("More Antartic penguins...")
st.write(
"*Check out this to know more!* 🐧 [link](https://es.wikipedia.org/wiki/Spheniscidae)")
cols[1].image([os.path.join(os.path.dirname(__file__),
"../assets/Emperor penguin.jpg"),
os.path.join(os.path.dirname(__file__),
"../assets/King penguin.jpg"),
os.path.join(os.path.dirname(__file__),
'../assets/Macaroni penguin.jpeg'),
os.path.join(os.path.dirname(__file__),
'../assets/rockhopper penguin.jpg'),
os.path.join(os.path.dirname(__file__),
'../assets/Royal penguin.jpg'),
os.path.join(os.path.dirname(__file__),
'../assets/rare penguin.jpg')
], width=300, caption=["Emperor", "King", "Macaroni", "rockhopper", "Royal", "rare"],)
# Geolocalización de las islas del Archipiélago Palmer
cols[1].header("**Where penguins live?**")
initial_location = []
folium_markers = []
def icon():
return folium.features.CustomIcon(
'https://d29fhpw069ctt2.cloudfront.net/icon/image/49037/preview.svg', icon_size=(30, 30))
Torgersen = requests.get(
f"{url_api}/location?name=Torgersen%20Island,%20Antarctica").json()
initial_location = [-64.968089, -63.551734]
folium_markers.append(
folium.Marker(
[Torgersen["latitude"], Torgersen["longitude"]], popup="Torgersen Island", tooltip="Torgersen Island", icon=icon()
)
)
Biscoe = requests.get(
f"{url_api}/location?name=Biscoe%20Islands,%20Antarctica").json()
folium_markers.append(
folium.Marker(
[Biscoe["latitude"], Biscoe["longitude"]], popup="Biscoe Island", tooltip="Biscoe Island", icon=icon()
)
)
# En el caso de la isla Dream, la API a la que hago la petición no es capaz de ubicarla correctamente,
# fuerzo a folium a utilizar las coordenadas reales
folium_markers.append(
folium.Marker(
[-64.7333323, -64.2420877], popup="Dream Island", tooltip="Dream Island", icon=icon()
)
)
m = folium.Map(location=initial_location, zoom_start=7)
for elem in folium_markers:
elem.add_to(m)
with cols[1]:
folium_static(m)
st.header("**Which is nearest island from my ubication?**")
form = st.form(key='my-form')
coordinates = form.text_input('Enter latitude,longitude')
submit = form.form_submit_button('Submit')
st.write('*Press submit to have your nearest island printed below*')
if submit:
response = requests.get(
f"{url_api}/islands?latlon={coordinates}").json()
st.write(f'*The nearest island is {response["name"]}*') | src/streamlit_dashboard/src/islands.py | import streamlit as st
import os
import requests
from streamlit_folium import folium_static
import folium
def render_streamlit():
url_api = os.getenv('URL_API')
cols = st.columns((1, 3, 1))
cols[1].title("More Antartic penguins...")
st.write(
"*Check out this to know more!* 🐧 [link](https://es.wikipedia.org/wiki/Spheniscidae)")
cols[1].image([os.path.join(os.path.dirname(__file__),
"../assets/Emperor penguin.jpg"),
os.path.join(os.path.dirname(__file__),
"../assets/King penguin.jpg"),
os.path.join(os.path.dirname(__file__),
'../assets/Macaroni penguin.jpeg'),
os.path.join(os.path.dirname(__file__),
'../assets/rockhopper penguin.jpg'),
os.path.join(os.path.dirname(__file__),
'../assets/Royal penguin.jpg'),
os.path.join(os.path.dirname(__file__),
'../assets/rare penguin.jpg')
], width=300, caption=["Emperor", "King", "Macaroni", "rockhopper", "Royal", "rare"],)
# Geolocalización de las islas del Archipiélago Palmer
cols[1].header("**Where penguins live?**")
initial_location = []
folium_markers = []
def icon():
return folium.features.CustomIcon(
'https://d29fhpw069ctt2.cloudfront.net/icon/image/49037/preview.svg', icon_size=(30, 30))
Torgersen = requests.get(
f"{url_api}/location?name=Torgersen%20Island,%20Antarctica").json()
initial_location = [-64.968089, -63.551734]
folium_markers.append(
folium.Marker(
[Torgersen["latitude"], Torgersen["longitude"]], popup="Torgersen Island", tooltip="Torgersen Island", icon=icon()
)
)
Biscoe = requests.get(
f"{url_api}/location?name=Biscoe%20Islands,%20Antarctica").json()
folium_markers.append(
folium.Marker(
[Biscoe["latitude"], Biscoe["longitude"]], popup="Biscoe Island", tooltip="Biscoe Island", icon=icon()
)
)
# En el caso de la isla Dream, la API a la que hago la petición no es capaz de ubicarla correctamente,
# fuerzo a folium a utilizar las coordenadas reales
folium_markers.append(
folium.Marker(
[-64.7333323, -64.2420877], popup="Dream Island", tooltip="Dream Island", icon=icon()
)
)
m = folium.Map(location=initial_location, zoom_start=7)
for elem in folium_markers:
elem.add_to(m)
with cols[1]:
folium_static(m)
st.header("**Which is nearest island from my ubication?**")
form = st.form(key='my-form')
coordinates = form.text_input('Enter latitude,longitude')
submit = form.form_submit_button('Submit')
st.write('*Press submit to have your nearest island printed below*')
if submit:
response = requests.get(
f"{url_api}/islands?latlon={coordinates}").json()
st.write(f'*The nearest island is {response["name"]}*') | 0.47926 | 0.198258 |
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
self.init_weights()
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
def init_weights(self):
"""Initialize the weights."""
self.self.linear.weight.data.normal_(0.0, 0.02)
self.linear.bias.data.fill_(0)
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
"""
Set the hyper-parameters and build the layers.
Parameters
----------
- embed_size : Dimensionality of image and word embeddings
- hidden_size : number of features in hidden state of the RNN decoder
- vocab_size : The size of vocabulary or output size
- num_layers : Number of layers
"""
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
# embedding layer that turns words into a vector of a specified size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
# The LSTM takes embedded vectors as inputs
# and outputs hidden states of hidden_size
self.lstm = nn.LSTM(input_size = embed_size,
hidden_size = hidden_size,
num_layers = num_layers,
batch_first = True)
# the linear layer that maps the hidden state output dimension
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def forward(self, features, captions):
"""Extract the image feature vectors."""
captions = captions[:,:-1]
embeds = self.word_embeddings(captions)
# Concatenating features to embedding
# torch.cat 3D tensors
inputs = torch.cat((features.unsqueeze(1), embeds), 1)
lstm_out, hidden = self.lstm(inputs)
outputs = self.linear(lstm_out)
return outputs
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def sample(self, inputs, states=None, max_len=20):
"""
Greedy search:
Samples captions for pre-processed image tensor (inputs)
and returns predicted sentence (list of tensor ids of length max_len)
"""
predicted_sentence = []
for i in range(max_len):
lstm_out, states = self.lstm(inputs, states)
lstm_out = lstm_out.squeeze(1)
lstm_out = lstm_out.squeeze(1)
outputs = self.linear(lstm_out)
# Get maximum probabilities
target = outputs.max(1)[1]
# Append result into predicted_sentence list
predicted_sentence.append(target.item())
# Update the input for next iteration
inputs = self.word_embeddings(target).unsqueeze(1)
return predicted_sentence | model.py | import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
self.init_weights()
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
def init_weights(self):
"""Initialize the weights."""
self.self.linear.weight.data.normal_(0.0, 0.02)
self.linear.bias.data.fill_(0)
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
"""
Set the hyper-parameters and build the layers.
Parameters
----------
- embed_size : Dimensionality of image and word embeddings
- hidden_size : number of features in hidden state of the RNN decoder
- vocab_size : The size of vocabulary or output size
- num_layers : Number of layers
"""
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
# embedding layer that turns words into a vector of a specified size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
# The LSTM takes embedded vectors as inputs
# and outputs hidden states of hidden_size
self.lstm = nn.LSTM(input_size = embed_size,
hidden_size = hidden_size,
num_layers = num_layers,
batch_first = True)
# the linear layer that maps the hidden state output dimension
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
def forward(self, features, captions):
"""Extract the image feature vectors."""
captions = captions[:,:-1]
embeds = self.word_embeddings(captions)
# Concatenating features to embedding
# torch.cat 3D tensors
inputs = torch.cat((features.unsqueeze(1), embeds), 1)
lstm_out, hidden = self.lstm(inputs)
outputs = self.linear(lstm_out)
return outputs
def init_weights(self):
"""Initialize weights."""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
def sample(self, inputs, states=None, max_len=20):
"""
Greedy search:
Samples captions for pre-processed image tensor (inputs)
and returns predicted sentence (list of tensor ids of length max_len)
"""
predicted_sentence = []
for i in range(max_len):
lstm_out, states = self.lstm(inputs, states)
lstm_out = lstm_out.squeeze(1)
lstm_out = lstm_out.squeeze(1)
outputs = self.linear(lstm_out)
# Get maximum probabilities
target = outputs.max(1)[1]
# Append result into predicted_sentence list
predicted_sentence.append(target.item())
# Update the input for next iteration
inputs = self.word_embeddings(target).unsqueeze(1)
return predicted_sentence | 0.965601 | 0.591104 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import fit
from models import calc_cs, calc_ffs, calc_ge_gm, calc_rho, dipole_ffs, get_b2, hbarc
matplotlib.rcParams["text.usetex"] = True
matplotlib.rcParams["font.size"] = 13
matplotlib.rcParams["font.family"] = "lmodern"
matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{lmodern}"
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
# Number of samples to use when generating statistical uncertainty bands
N_SAMPLES = 1000
def read_Rosenbluth_data():
"""Read data for G_E and G_M from "Rosenbluth.dat"."""
col_names = ["Q2", "GE", "delta_GE", "GM", "delta_GM"]
data = pd.read_csv("data/Rosenbluth.dat", sep=" ", skiprows=5, names=col_names)
return data
def calc_interval(calc_func, x_range, param_list, order):
"""Calculate 68% ("1 sigma") percentile interval from param sample."""
out = np.array([calc_func(x_range, param, order) for param in param_list])
return np.percentile(out, (15.9, 84.1), 0)
def calc_params(data, order, reg_param):
"""Run fit and get model parameters and covariance."""
params, _, _, _, cov = fit.fit(data, data, order, reg_param)
params = params[fit.N_NORM_PARAMS :]
cov = cov[fit.N_NORM_PARAMS :, fit.N_NORM_PARAMS :]
return params, cov
def calc_sys_bands(calc_func, x_range, data, order, reg_param):
"""Calculate systematic error bands for given quantity."""
params, _ = calc_params(data, order, reg_param)
f1, f2 = calc_func(x_range, params, order)
mincut_params = fit.fit_systematic_variant("cs_mincut", data, order, reg_param)[0]
maxcut_params = fit.fit_systematic_variant("cs_maxcut", data, order, reg_param)[0]
sysup_params = fit.fit_systematic_variant("cs_sysup", data, order, reg_param)[0]
syslow_params = fit.fit_systematic_variant("cs_syslow", data, order, reg_param)[0]
mincut_f1, mincut_f2 = calc_func(x_range, mincut_params, order)
maxcut_f1, maxcut_f2 = calc_func(x_range, maxcut_params, order)
sysup_f1, sysup_f2 = calc_func(x_range, sysup_params, order)
syslow_f1, syslow_f2 = calc_func(x_range, syslow_params, order)
# Calculate upper and lower limits for each of the systematic variations:
f1_cut_up = np.clip(np.max(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), 0, None)
f1_cut_low = np.clip(np.min(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), None, 0)
f1_sys_up = np.clip(np.max(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), 0, None)
f1_sys_low = np.clip(np.min(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), None, 0)
f2_cut_up = np.clip(np.max(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), 0, None)
f2_cut_low = np.clip(np.min(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), None, 0)
f2_sys_up = np.clip(np.max(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), 0, None)
f2_sys_low = np.clip(np.min(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), None, 0)
# Add two systematic "errors" in quadrature:
f1_up = np.sqrt(f1_cut_up ** 2 + f1_sys_up ** 2)
f1_low = np.sqrt(f1_cut_low ** 2 + f1_sys_low ** 2)
f2_up = np.sqrt(f2_cut_up ** 2 + f2_sys_up ** 2)
f2_low = np.sqrt(f2_cut_low ** 2 + f2_sys_low ** 2)
return f1_up, f1_low, f2_up, f2_low
def fill_between(x_range, y_up, y_low, color, hbarc_scale=False):
"""Plot confidence interval."""
if hbarc_scale:
x_range = hbarc * x_range
y_up = y_up / (hbarc * hbarc)
y_low = y_low / (hbarc * hbarc)
plt.fill_between(x_range, y_up, y_low, color=color, lw=0, alpha=0.7)
def plot_f1_f2(data, order, reg_param):
"""Plot the Dirac and Pauli form factors."""
params, cov = calc_params(data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
F1, F2 = calc_ffs(Q2_range, params, order)
# Transverse charge radius and the slope of F1:
b2, _ = get_b2(params, cov)
slope_x = np.linspace(0, 0.15, 10)
slope_y = 1 - slope_x * b2 / 4
# Plot the form factor slope:
plt.plot(slope_x, slope_y, ls="--", color="black", lw=1)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_ffs, Q2_range, params, order)
# Calculate systematic uncertainties:
f1_up, f1_low, f2_up, f2_low = calc_sys_bands(calc_ffs, Q2_range, data, order, reg_param)
# Plot the systematic band for F2:
fill_between(Q2_range, interval[1, 1] + f2_up, interval[1, 1], "blue")
fill_between(Q2_range, interval[0, 1], interval[0, 1] - f2_low, "blue")
# Plot the statistical band for F2:
fill_between(Q2_range, interval[1, 1], interval[0, 1], "#AAAAFF")
# Plot the best-fit line for F2:
plt.plot(Q2_range, F2, color="blue", lw=0.6, alpha=0.7)
# Plot the same things for F1:
if draw_confidence:
fill_between(Q2_range, interval[1, 0] + f1_up, interval[1, 0], "red")
fill_between(Q2_range, interval[0, 0], interval[0, 0] - f1_low, "red")
fill_between(Q2_range, interval[1, 0], interval[0, 0], "#FFAAAA")
plt.plot(Q2_range, F1, color="red", lw=0.6, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$F_1, \, F_2$", labelpad=11)
if order == 5:
plt.text(0.45, 0.46, r"$F_1$", color="#FF0000")
plt.text(0.36, 0.31, r"$F_2$", color="#0000FF")
def plot_rhos(data, order, reg_param):
"""Plot the transverse densities rho1 and rho2."""
rho_range = np.linspace(0, 10.1, 100)
params, cov = calc_params(data, order, reg_param)
rho1, rho2 = calc_rho(rho_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_rho, rho_range, params, order)
# Calculate systematic uncertainties:
rho1_up, rho1_low, rho2_up, rho2_low = calc_sys_bands(calc_rho, rho_range, data, order, reg_param)
# Plot the systematic band for rho1:
fill_between(rho_range, interval[1, 0] + rho1_up, interval[1, 0], "red", hbarc_scale=True)
fill_between(rho_range, interval[0, 0], interval[0, 0] - rho1_low, "red", hbarc_scale=True)
# Plot the statistical band for rho1:
fill_between(rho_range, interval[1, 0], interval[0, 0], "#FFAAAA", hbarc_scale=True)
# Plot the best-fit line for rho1:
plt.plot(hbarc * rho_range, rho1 / (hbarc * hbarc), color="red", alpha=0.7, lw=0.6)
# Plot the same things for rho2:
if draw_confidence:
fill_between(rho_range, interval[1, 1] + rho2_up, interval[1, 1], "blue", hbarc_scale=True)
fill_between(rho_range, interval[0, 1], interval[0, 1] - rho2_low, "blue", hbarc_scale=True)
fill_between(rho_range, interval[1, 1], interval[0, 1], "#AAAAFF", hbarc_scale=True)
plt.plot(hbarc * rho_range, rho2 / (hbarc * hbarc), color="blue", alpha=0.7, lw=0.6)
# Axes and labels:
plt.xlim(0, 2)
plt.yscale("log")
plt.xlabel(r"$b~(\mathrm{fm})$", labelpad=6)
plt.ylabel(r"$\rho_1, \, \rho_2~\left(\mathrm{fm}^{-2}\right)$")
if order == 5:
plt.text(0.94, 0.013, r"$\rho_1$", color="#FF0000")
plt.text(1.1, 0.079, r"$\rho_2$", color="#0000FF")
def plot_ge_gm(cs_data, R_data, order, reg_param):
"""Plot the Sachs electric and magnetic form factors."""
params, cov = calc_params(cs_data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
GE, GM = calc_ge_gm(Q2_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
# Calculate statistical uncertainties:
if draw_confidence:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_ge_gm, Q2_range, params, order)
# Calculate systematic uncertainties:
f1_up, f1_low, f2_up, f2_low = calc_sys_bands(calc_ge_gm, Q2_range, cs_data, order, reg_param)
fig = plt.figure(figsize=(10, 3.5))
plt.subplots_adjust(wspace=0.35)
# Left panel (electric form factor):
fig.add_subplot(1, 2, 1)
GE_dip, GM_dip = dipole_ffs(R_data["Q2"])
GE_R = R_data["GE"] / GE_dip
delta_GE_R = R_data["delta_GE"] / GE_dip
# Plot the experimental data points for G_E:
plt.errorbar(R_data["Q2"], GE_R, yerr=delta_GE_R, fmt="ob", ms=1.5, lw=1, zorder=0)
if draw_confidence:
# Plot the systematic band for G_E:
fill_between(Q2_range, interval[1, 0] + f1_up, interval[1, 0], "red")
fill_between(Q2_range, interval[0, 0], interval[0, 0] - f1_low, "red")
# Plot the statistical band for G_E:
fill_between(Q2_range, interval[1, 0], interval[0, 0], "#FFAAAA")
# Plot the best-fit line for G_E:
plt.plot(Q2_range, GE, color="black", lw=1, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
if order == 5:
plt.ylim(0.6, 1.02)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$G_{E} / G_{\mathrm{dip}}$")
# Right panel (magnetic form factor):
fig.add_subplot(1, 2, 2)
GM_R = R_data["GM"] / GM_dip
delta_GM_R = R_data["delta_GM"] / GM_dip
# Plot the experimental data points for G_M:
plt.errorbar(R_data["Q2"], GM_R, yerr=delta_GM_R, fmt="ob", ms=1.5, lw=1, zorder=0)
if draw_confidence:
# Plot the systematic band for G_M:
fill_between(Q2_range, interval[1, 1] + f2_up, interval[1, 1], "red")
fill_between(Q2_range, interval[0, 1], interval[0, 1] - f2_low, "red")
# Plot the statistical band for G_M:
fill_between(Q2_range, interval[1, 1], interval[0, 1], "#FFAAAA")
# Plot the best-fit line for G_M:
plt.plot(Q2_range, GM, color="black", lw=1, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
if order == 5:
plt.ylim(0.98, 1.09)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$G_{M} / (\mu \, G_{\mathrm{dip}})$")
def plot_cs(data, order, reg_param):
"""Plot the measured cross sections with best fits."""
params, _, _, _, _ = fit.fit(data, data, order, reg_param)
# Renormalize the cross sections:
norm_params = np.concatenate([[1], params[: fit.N_NORM_PARAMS]])
norm = np.prod(norm_params[data["norms"]], axis=1)
data["cs"] = norm * data["cs"]
data["delta_cs"] = norm * data["delta_cs"]
fig_S1 = plt.figure(figsize=(10, 13))
plt.subplots_adjust(wspace=0.25, hspace=0.3)
for i, energy in enumerate(fit.BEAM_ENERGIES):
ax = fig_S1.add_subplot(3, 2, i + 1)
Q2max = np.amax(data["Q2"][data["E"] == energy])
Q2val = np.linspace(0, Q2max, 100)
curve = calc_cs(0.001 * energy, Q2val, params[fit.N_NORM_PARAMS :], order)
# Spectrometer A:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "A")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "A")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "A")]
plt.errorbar(Q2, cs, delta_cs, fmt="sr", ms=3, lw=1)
# Spectrometer B:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "B")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "B")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "B")]
plt.errorbar(Q2, cs, delta_cs, fmt="ob", ms=3, lw=1)
# Spectrometer C:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "C")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "C")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "C")]
plt.errorbar(Q2, cs, delta_cs, fmt="^g", ms=3, lw=1)
plt.plot(Q2val, curve, "k-", linewidth=2, alpha=0.7, zorder=3)
plt.xlim(left=0)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$\sigma_{\mathrm{red}} / \sigma_{\mathrm{dip}}$")
plt.text(0.5, 0.92, str(energy) + " MeV", horizontalalignment="center", transform=ax.transAxes)
def save_fig(path):
"""Save figures to path."""
print("Saving to '{}'".format(path))
plt.savefig(path, bbox_inches="tight")
def main(order, reg_param):
print("Model: N = {}, lambda = {}".format(order, reg_param))
# Read the cross section and Rosenbluth data:
cs_data = fit.read_cs_data()
Rosenbluth_data = read_Rosenbluth_data()
# Figure 1:
print("Plotting F1, F2, and transverse charge densities...")
fig_1 = plt.figure(figsize=(10, 3.5))
plt.subplots_adjust(wspace=0.35)
# Figure 1, left panel (Dirac and Pauli form factors):
ax1 = fig_1.add_subplot(1, 2, 1)
plot_f1_f2(cs_data, order, reg_param)
plt.text(0.9, 0.91, "(a)", transform=ax1.transAxes, fontsize=14)
# Figure 1, right panel (transverse charge densities):
ax2 = fig_1.add_subplot(1, 2, 2)
plot_rhos(cs_data, order, reg_param)
plt.text(0.9, 0.91, "(b)", transform=ax2.transAxes, fontsize=14)
save_fig("figures/fig_1.pdf")
# Figure S1 (electric and magnetic form factors):
print("Plotting GE and GM...")
plot_ge_gm(cs_data, Rosenbluth_data, order, reg_param)
save_fig("figures/fig_S1.pdf")
# Figure S2 (fitted cross sections):
print("Plotting fitted cross sections...")
plot_cs(cs_data, order, reg_param)
save_fig("figures/fig_S2.pdf")
if __name__ == "__main__":
ARGS = fit.parse_args()
main(ARGS.order, ARGS.reg_param) | plot.py |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import fit
from models import calc_cs, calc_ffs, calc_ge_gm, calc_rho, dipole_ffs, get_b2, hbarc
matplotlib.rcParams["text.usetex"] = True
matplotlib.rcParams["font.size"] = 13
matplotlib.rcParams["font.family"] = "lmodern"
matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{lmodern}"
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
# Number of samples to use when generating statistical uncertainty bands
N_SAMPLES = 1000
def read_Rosenbluth_data():
"""Read data for G_E and G_M from "Rosenbluth.dat"."""
col_names = ["Q2", "GE", "delta_GE", "GM", "delta_GM"]
data = pd.read_csv("data/Rosenbluth.dat", sep=" ", skiprows=5, names=col_names)
return data
def calc_interval(calc_func, x_range, param_list, order):
"""Calculate 68% ("1 sigma") percentile interval from param sample."""
out = np.array([calc_func(x_range, param, order) for param in param_list])
return np.percentile(out, (15.9, 84.1), 0)
def calc_params(data, order, reg_param):
"""Run fit and get model parameters and covariance."""
params, _, _, _, cov = fit.fit(data, data, order, reg_param)
params = params[fit.N_NORM_PARAMS :]
cov = cov[fit.N_NORM_PARAMS :, fit.N_NORM_PARAMS :]
return params, cov
def calc_sys_bands(calc_func, x_range, data, order, reg_param):
"""Calculate systematic error bands for given quantity."""
params, _ = calc_params(data, order, reg_param)
f1, f2 = calc_func(x_range, params, order)
mincut_params = fit.fit_systematic_variant("cs_mincut", data, order, reg_param)[0]
maxcut_params = fit.fit_systematic_variant("cs_maxcut", data, order, reg_param)[0]
sysup_params = fit.fit_systematic_variant("cs_sysup", data, order, reg_param)[0]
syslow_params = fit.fit_systematic_variant("cs_syslow", data, order, reg_param)[0]
mincut_f1, mincut_f2 = calc_func(x_range, mincut_params, order)
maxcut_f1, maxcut_f2 = calc_func(x_range, maxcut_params, order)
sysup_f1, sysup_f2 = calc_func(x_range, sysup_params, order)
syslow_f1, syslow_f2 = calc_func(x_range, syslow_params, order)
# Calculate upper and lower limits for each of the systematic variations:
f1_cut_up = np.clip(np.max(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), 0, None)
f1_cut_low = np.clip(np.min(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), None, 0)
f1_sys_up = np.clip(np.max(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), 0, None)
f1_sys_low = np.clip(np.min(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), None, 0)
f2_cut_up = np.clip(np.max(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), 0, None)
f2_cut_low = np.clip(np.min(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), None, 0)
f2_sys_up = np.clip(np.max(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), 0, None)
f2_sys_low = np.clip(np.min(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), None, 0)
# Add two systematic "errors" in quadrature:
f1_up = np.sqrt(f1_cut_up ** 2 + f1_sys_up ** 2)
f1_low = np.sqrt(f1_cut_low ** 2 + f1_sys_low ** 2)
f2_up = np.sqrt(f2_cut_up ** 2 + f2_sys_up ** 2)
f2_low = np.sqrt(f2_cut_low ** 2 + f2_sys_low ** 2)
return f1_up, f1_low, f2_up, f2_low
def fill_between(x_range, y_up, y_low, color, hbarc_scale=False):
"""Plot confidence interval."""
if hbarc_scale:
x_range = hbarc * x_range
y_up = y_up / (hbarc * hbarc)
y_low = y_low / (hbarc * hbarc)
plt.fill_between(x_range, y_up, y_low, color=color, lw=0, alpha=0.7)
def plot_f1_f2(data, order, reg_param):
"""Plot the Dirac and Pauli form factors."""
params, cov = calc_params(data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
F1, F2 = calc_ffs(Q2_range, params, order)
# Transverse charge radius and the slope of F1:
b2, _ = get_b2(params, cov)
slope_x = np.linspace(0, 0.15, 10)
slope_y = 1 - slope_x * b2 / 4
# Plot the form factor slope:
plt.plot(slope_x, slope_y, ls="--", color="black", lw=1)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_ffs, Q2_range, params, order)
# Calculate systematic uncertainties:
f1_up, f1_low, f2_up, f2_low = calc_sys_bands(calc_ffs, Q2_range, data, order, reg_param)
# Plot the systematic band for F2:
fill_between(Q2_range, interval[1, 1] + f2_up, interval[1, 1], "blue")
fill_between(Q2_range, interval[0, 1], interval[0, 1] - f2_low, "blue")
# Plot the statistical band for F2:
fill_between(Q2_range, interval[1, 1], interval[0, 1], "#AAAAFF")
# Plot the best-fit line for F2:
plt.plot(Q2_range, F2, color="blue", lw=0.6, alpha=0.7)
# Plot the same things for F1:
if draw_confidence:
fill_between(Q2_range, interval[1, 0] + f1_up, interval[1, 0], "red")
fill_between(Q2_range, interval[0, 0], interval[0, 0] - f1_low, "red")
fill_between(Q2_range, interval[1, 0], interval[0, 0], "#FFAAAA")
plt.plot(Q2_range, F1, color="red", lw=0.6, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$F_1, \, F_2$", labelpad=11)
if order == 5:
plt.text(0.45, 0.46, r"$F_1$", color="#FF0000")
plt.text(0.36, 0.31, r"$F_2$", color="#0000FF")
def plot_rhos(data, order, reg_param):
"""Plot the transverse densities rho1 and rho2."""
rho_range = np.linspace(0, 10.1, 100)
params, cov = calc_params(data, order, reg_param)
rho1, rho2 = calc_rho(rho_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_rho, rho_range, params, order)
# Calculate systematic uncertainties:
rho1_up, rho1_low, rho2_up, rho2_low = calc_sys_bands(calc_rho, rho_range, data, order, reg_param)
# Plot the systematic band for rho1:
fill_between(rho_range, interval[1, 0] + rho1_up, interval[1, 0], "red", hbarc_scale=True)
fill_between(rho_range, interval[0, 0], interval[0, 0] - rho1_low, "red", hbarc_scale=True)
# Plot the statistical band for rho1:
fill_between(rho_range, interval[1, 0], interval[0, 0], "#FFAAAA", hbarc_scale=True)
# Plot the best-fit line for rho1:
plt.plot(hbarc * rho_range, rho1 / (hbarc * hbarc), color="red", alpha=0.7, lw=0.6)
# Plot the same things for rho2:
if draw_confidence:
fill_between(rho_range, interval[1, 1] + rho2_up, interval[1, 1], "blue", hbarc_scale=True)
fill_between(rho_range, interval[0, 1], interval[0, 1] - rho2_low, "blue", hbarc_scale=True)
fill_between(rho_range, interval[1, 1], interval[0, 1], "#AAAAFF", hbarc_scale=True)
plt.plot(hbarc * rho_range, rho2 / (hbarc * hbarc), color="blue", alpha=0.7, lw=0.6)
# Axes and labels:
plt.xlim(0, 2)
plt.yscale("log")
plt.xlabel(r"$b~(\mathrm{fm})$", labelpad=6)
plt.ylabel(r"$\rho_1, \, \rho_2~\left(\mathrm{fm}^{-2}\right)$")
if order == 5:
plt.text(0.94, 0.013, r"$\rho_1$", color="#FF0000")
plt.text(1.1, 0.079, r"$\rho_2$", color="#0000FF")
def plot_ge_gm(cs_data, R_data, order, reg_param):
"""Plot the Sachs electric and magnetic form factors."""
params, cov = calc_params(cs_data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
GE, GM = calc_ge_gm(Q2_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
# Calculate statistical uncertainties:
if draw_confidence:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_ge_gm, Q2_range, params, order)
# Calculate systematic uncertainties:
f1_up, f1_low, f2_up, f2_low = calc_sys_bands(calc_ge_gm, Q2_range, cs_data, order, reg_param)
fig = plt.figure(figsize=(10, 3.5))
plt.subplots_adjust(wspace=0.35)
# Left panel (electric form factor):
fig.add_subplot(1, 2, 1)
GE_dip, GM_dip = dipole_ffs(R_data["Q2"])
GE_R = R_data["GE"] / GE_dip
delta_GE_R = R_data["delta_GE"] / GE_dip
# Plot the experimental data points for G_E:
plt.errorbar(R_data["Q2"], GE_R, yerr=delta_GE_R, fmt="ob", ms=1.5, lw=1, zorder=0)
if draw_confidence:
# Plot the systematic band for G_E:
fill_between(Q2_range, interval[1, 0] + f1_up, interval[1, 0], "red")
fill_between(Q2_range, interval[0, 0], interval[0, 0] - f1_low, "red")
# Plot the statistical band for G_E:
fill_between(Q2_range, interval[1, 0], interval[0, 0], "#FFAAAA")
# Plot the best-fit line for G_E:
plt.plot(Q2_range, GE, color="black", lw=1, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
if order == 5:
plt.ylim(0.6, 1.02)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$G_{E} / G_{\mathrm{dip}}$")
# Right panel (magnetic form factor):
fig.add_subplot(1, 2, 2)
GM_R = R_data["GM"] / GM_dip
delta_GM_R = R_data["delta_GM"] / GM_dip
# Plot the experimental data points for G_M:
plt.errorbar(R_data["Q2"], GM_R, yerr=delta_GM_R, fmt="ob", ms=1.5, lw=1, zorder=0)
if draw_confidence:
# Plot the systematic band for G_M:
fill_between(Q2_range, interval[1, 1] + f2_up, interval[1, 1], "red")
fill_between(Q2_range, interval[0, 1], interval[0, 1] - f2_low, "red")
# Plot the statistical band for G_M:
fill_between(Q2_range, interval[1, 1], interval[0, 1], "#FFAAAA")
# Plot the best-fit line for G_M:
plt.plot(Q2_range, GM, color="black", lw=1, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
if order == 5:
plt.ylim(0.98, 1.09)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$G_{M} / (\mu \, G_{\mathrm{dip}})$")
def plot_cs(data, order, reg_param):
"""Plot the measured cross sections with best fits."""
params, _, _, _, _ = fit.fit(data, data, order, reg_param)
# Renormalize the cross sections:
norm_params = np.concatenate([[1], params[: fit.N_NORM_PARAMS]])
norm = np.prod(norm_params[data["norms"]], axis=1)
data["cs"] = norm * data["cs"]
data["delta_cs"] = norm * data["delta_cs"]
fig_S1 = plt.figure(figsize=(10, 13))
plt.subplots_adjust(wspace=0.25, hspace=0.3)
for i, energy in enumerate(fit.BEAM_ENERGIES):
ax = fig_S1.add_subplot(3, 2, i + 1)
Q2max = np.amax(data["Q2"][data["E"] == energy])
Q2val = np.linspace(0, Q2max, 100)
curve = calc_cs(0.001 * energy, Q2val, params[fit.N_NORM_PARAMS :], order)
# Spectrometer A:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "A")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "A")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "A")]
plt.errorbar(Q2, cs, delta_cs, fmt="sr", ms=3, lw=1)
# Spectrometer B:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "B")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "B")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "B")]
plt.errorbar(Q2, cs, delta_cs, fmt="ob", ms=3, lw=1)
# Spectrometer C:
Q2 = data["Q2"][(data["E"] == energy) & (data["spec"] == "C")]
cs = data["cs"][(data["E"] == energy) & (data["spec"] == "C")]
delta_cs = data["delta_cs"][(data["E"] == energy) & (data["spec"] == "C")]
plt.errorbar(Q2, cs, delta_cs, fmt="^g", ms=3, lw=1)
plt.plot(Q2val, curve, "k-", linewidth=2, alpha=0.7, zorder=3)
plt.xlim(left=0)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$\sigma_{\mathrm{red}} / \sigma_{\mathrm{dip}}$")
plt.text(0.5, 0.92, str(energy) + " MeV", horizontalalignment="center", transform=ax.transAxes)
def save_fig(path):
"""Save figures to path."""
print("Saving to '{}'".format(path))
plt.savefig(path, bbox_inches="tight")
def main(order, reg_param):
print("Model: N = {}, lambda = {}".format(order, reg_param))
# Read the cross section and Rosenbluth data:
cs_data = fit.read_cs_data()
Rosenbluth_data = read_Rosenbluth_data()
# Figure 1:
print("Plotting F1, F2, and transverse charge densities...")
fig_1 = plt.figure(figsize=(10, 3.5))
plt.subplots_adjust(wspace=0.35)
# Figure 1, left panel (Dirac and Pauli form factors):
ax1 = fig_1.add_subplot(1, 2, 1)
plot_f1_f2(cs_data, order, reg_param)
plt.text(0.9, 0.91, "(a)", transform=ax1.transAxes, fontsize=14)
# Figure 1, right panel (transverse charge densities):
ax2 = fig_1.add_subplot(1, 2, 2)
plot_rhos(cs_data, order, reg_param)
plt.text(0.9, 0.91, "(b)", transform=ax2.transAxes, fontsize=14)
save_fig("figures/fig_1.pdf")
# Figure S1 (electric and magnetic form factors):
print("Plotting GE and GM...")
plot_ge_gm(cs_data, Rosenbluth_data, order, reg_param)
save_fig("figures/fig_S1.pdf")
# Figure S2 (fitted cross sections):
print("Plotting fitted cross sections...")
plot_cs(cs_data, order, reg_param)
save_fig("figures/fig_S2.pdf")
if __name__ == "__main__":
ARGS = fit.parse_args()
main(ARGS.order, ARGS.reg_param) | 0.73431 | 0.550668 |
import os, sys
from bob.math import pavx, pavxWidth, pavxWidthHeight
import numpy
def pavx_check(y, ghat_ref, w_ref, h_ref):
"""Make a full test for a given sample"""
ghat = pavx(y)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
pavx(y, ghat)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
w=pavxWidth(y, ghat)
assert numpy.all(numpy.abs(w - w_ref) < 1e-4)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
ret=pavxWidthHeight(y, ghat)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
assert numpy.all(numpy.abs(ret[0] - w_ref) < 1e-4)
assert numpy.all(numpy.abs(ret[1] - h_ref) < 1e-4)
def test_pavx_sample1():
# Reference obtained using bosaris toolkit 1.06
y = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 159.1108,
156.8654, 168.6932, 175.2756])
ghat_ref = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 157.9881,
157.9881, 168.6932, 175.2756])
w_ref = numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1])
h_ref = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 157.9881,
168.6932, 175.2756])
pavx_check(y, ghat_ref, w_ref, h_ref)
def test_pavx_sample2():
# Reference obtained using bosaris toolkit 1.06
y = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 100.4421,
92.8593, 84.0840, 98.5769, 102.3841, 143.5045,
120.8439, 141.4807, 139.0758, 156.8861, 147.3515,
147.9773, 154.7762, 180.8819])
ghat_ref = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 92.4618,
92.4618, 92.4618, 98.5769, 102.3841, 132.1742,
132.1742, 140.2783, 140.2783, 150.7383, 150.7383,
150.7383, 154.7762, 180.8819])
w_ref = numpy.array([1, 1, 1, 1, 3, 1, 1, 2, 2, 3, 1, 1])
h_ref = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 92.4618,
98.5769, 102.3841, 132.1742, 140.2783, 150.7383,
154.7762, 180.8819])
pavx_check(y, ghat_ref, w_ref, h_ref) | bob/math/test_pavx.py | import os, sys
from bob.math import pavx, pavxWidth, pavxWidthHeight
import numpy
def pavx_check(y, ghat_ref, w_ref, h_ref):
"""Make a full test for a given sample"""
ghat = pavx(y)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
pavx(y, ghat)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
w=pavxWidth(y, ghat)
assert numpy.all(numpy.abs(w - w_ref) < 1e-4)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
ret=pavxWidthHeight(y, ghat)
assert numpy.all(numpy.abs(ghat - ghat_ref) < 1e-4)
assert numpy.all(numpy.abs(ret[0] - w_ref) < 1e-4)
assert numpy.all(numpy.abs(ret[1] - h_ref) < 1e-4)
def test_pavx_sample1():
# Reference obtained using bosaris toolkit 1.06
y = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 159.1108,
156.8654, 168.6932, 175.2756])
ghat_ref = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 157.9881,
157.9881, 168.6932, 175.2756])
w_ref = numpy.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1])
h_ref = numpy.array([ 58.4666, 67.1040, 73.1806, 77.0896, 85.8816,
89.6381, 101.6651, 102.5587, 109.7933, 117.5715,
118.1671, 138.3151, 141.9755, 145.7352, 157.9881,
168.6932, 175.2756])
pavx_check(y, ghat_ref, w_ref, h_ref)
def test_pavx_sample2():
# Reference obtained using bosaris toolkit 1.06
y = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 100.4421,
92.8593, 84.0840, 98.5769, 102.3841, 143.5045,
120.8439, 141.4807, 139.0758, 156.8861, 147.3515,
147.9773, 154.7762, 180.8819])
ghat_ref = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 92.4618,
92.4618, 92.4618, 98.5769, 102.3841, 132.1742,
132.1742, 140.2783, 140.2783, 150.7383, 150.7383,
150.7383, 154.7762, 180.8819])
w_ref = numpy.array([1, 1, 1, 1, 3, 1, 1, 2, 2, 3, 1, 1])
h_ref = numpy.array([ 46.1093, 64.3255, 76.5252, 89.0061, 92.4618,
98.5769, 102.3841, 132.1742, 140.2783, 150.7383,
154.7762, 180.8819])
pavx_check(y, ghat_ref, w_ref, h_ref) | 0.348534 | 0.584805 |
from HDPython.ast.ast_classes.ast_base import v_ast_base, add_class,gIndent
import HDPython.hdl_converter as hdl
from HDPython.base import HDPython_base
from HDPython.v_enum import v_enum
from HDPython.v_symbol import v_symbol
class v_compare(v_ast_base):
def __init__(self,lhs,ops,rhs,astParser):
self.lhs = lhs
self.rhs = rhs
self.ops = ops
self.astParser =astParser
def __str__(self):
if issubclass(type(self.lhs),HDPython_base):
return hdl.impl_compare(self.lhs, self.ops, self.rhs, self.astParser)
return str(self.lhs) + " = " + str(self.rhs)
def get_type(self):
return "boolean"
def impl_to_bool(self,astParser):
if type(self.rhs).__name__ == "v_name":
rhs = astParser.get_variable(self.rhs.Value,None)
else:
rhs = self.rhs
if type(self.lhs).__name__ == "v_name":
obj = astParser.get_variable(self.lhs.Value,None)
return hdl.impl_compare(obj, rhs)
if self.lhs._issubclass_("v_class"):
return hdl.impl_compare(
self.lhs,
self.ops,
self.rhs,
astParser
)
if issubclass(type(self.lhs),v_symbol):
return hdl.impl_compare(
self.lhs,
self.ops ,
self.rhs,
astParser
)
if issubclass(type(self.lhs),v_enum):
return hdl.impl_compare(
self.lhs,
self.ops ,
self.rhs,
astParser
)
raise Exception("unknown type",type(self.lhs).__name__ )
def body_unfold_Compare(astParser,Node):
if len (Node.ops)>1:
raise Exception("unexpected number of operators")
return v_compare(
astParser.Unfold_body(Node.left),
type(Node.ops[0]).__name__,
astParser.Unfold_body(Node.comparators[0]),
astParser
)
add_class("Compare", body_unfold_Compare) | HDPython/ast/ast_classes/ast_compare.py | from HDPython.ast.ast_classes.ast_base import v_ast_base, add_class,gIndent
import HDPython.hdl_converter as hdl
from HDPython.base import HDPython_base
from HDPython.v_enum import v_enum
from HDPython.v_symbol import v_symbol
class v_compare(v_ast_base):
def __init__(self,lhs,ops,rhs,astParser):
self.lhs = lhs
self.rhs = rhs
self.ops = ops
self.astParser =astParser
def __str__(self):
if issubclass(type(self.lhs),HDPython_base):
return hdl.impl_compare(self.lhs, self.ops, self.rhs, self.astParser)
return str(self.lhs) + " = " + str(self.rhs)
def get_type(self):
return "boolean"
def impl_to_bool(self,astParser):
if type(self.rhs).__name__ == "v_name":
rhs = astParser.get_variable(self.rhs.Value,None)
else:
rhs = self.rhs
if type(self.lhs).__name__ == "v_name":
obj = astParser.get_variable(self.lhs.Value,None)
return hdl.impl_compare(obj, rhs)
if self.lhs._issubclass_("v_class"):
return hdl.impl_compare(
self.lhs,
self.ops,
self.rhs,
astParser
)
if issubclass(type(self.lhs),v_symbol):
return hdl.impl_compare(
self.lhs,
self.ops ,
self.rhs,
astParser
)
if issubclass(type(self.lhs),v_enum):
return hdl.impl_compare(
self.lhs,
self.ops ,
self.rhs,
astParser
)
raise Exception("unknown type",type(self.lhs).__name__ )
def body_unfold_Compare(astParser,Node):
if len (Node.ops)>1:
raise Exception("unexpected number of operators")
return v_compare(
astParser.Unfold_body(Node.left),
type(Node.ops[0]).__name__,
astParser.Unfold_body(Node.comparators[0]),
astParser
)
add_class("Compare", body_unfold_Compare) | 0.563378 | 0.200303 |
from math import *
from sympy import symbols,diff
from sympy.abc import x,y
def standardNR():
print("standard nr")
f = input("enter f(x) in pyth n syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan().\n")
fx = diff(f, x)
lf = eval("lambda x:"+str(f))
lfx = eval("lambda x:"+str(fx))
x_0 = float(input("x0 :"))
h=lambda x: x - lf(x)/lfx(x)
print ("n \t x1 \t\t x2 ")
counter = 1
while (abs (( x_0-h(x_0))) >= 0.00001) :
hv = h(x_0)
print(counter,"\t",'%.5f'%x_0,"\t",'%.5f'%hv,"\t")
counter +=1
x_0=h(x_0)
print("The value of root is : ",'%.5f'%x_0,)
def non_HomoNR():
f = input("enter f(x,y) in pyth n syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan(). \n")
g = input("enter g(x,y) expression in python syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan(). \n")
x_0,y_0 = map(float, input("x0 y0 :").split())
fx = diff(f,x)
fy = diff(f,y)
gx = diff(g,x)
gy = diff(g,y)
lf= eval("lambda x,y:"+str(f))
lg= eval("lambda x,y:"+str(g))
lfx= eval("lambda x,y:"+str(fx))
lgx= eval("lambda x,y:"+str(gx))
lfy= eval("lambda x,y:"+str(fy))
lgy= eval("lambda x,y:"+str(gy))
counter = 0
h = lambda x,y: ((lg(x,y)*lfy(x,y)-lf(x,y)*lgy(x,y))/\
(lfx(x,y)*lgy(x,y)-lgx(x,y)*lfy(x,y)))
k = lambda x,y: ((lf(x,y)*lgx(x,y)-lg(x,y)*lfx(x,y))/\
(lfx(x,y)*lgy(x,y)-lgx(x,y)*lfy(x,y)))
print ("n \t x \t\t y \t\t h \t\t k ")
while (abs((h(x_0,y_0)+k(x_0,y_0))) >= 0.00002):
hv = h(x_0,y_0)
kv = k(x_0,y_0)
print(counter,"\t",'%.5f'%x_0,"\t",'%.5f'%y_0,"\t" ,'%.5f'%hv,"\t" , '%.5f'%kv)
counter +=1
x_0=x_0+h(x_0,y_0)
y_0=y_0+k(x_0,y_0)
print("The value of root is : ",'%.5f'%x_0,'%.5f'%y_0)
def main():
print("Starting.........")
# a , b = map(int, input("enter a b :").split())
case=int(input("enter 1 for single variable and 2 for multi \t"))
if case == 1:
standardNR()
else :
non_HomoNR()
if __name__ == "__main__":
main() | Newton-Raphson_Method.py | from math import *
from sympy import symbols,diff
from sympy.abc import x,y
def standardNR():
print("standard nr")
f = input("enter f(x) in pyth n syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan().\n")
fx = diff(f, x)
lf = eval("lambda x:"+str(f))
lfx = eval("lambda x:"+str(fx))
x_0 = float(input("x0 :"))
h=lambda x: x - lf(x)/lfx(x)
print ("n \t x1 \t\t x2 ")
counter = 1
while (abs (( x_0-h(x_0))) >= 0.00001) :
hv = h(x_0)
print(counter,"\t",'%.5f'%x_0,"\t",'%.5f'%hv,"\t")
counter +=1
x_0=h(x_0)
print("The value of root is : ",'%.5f'%x_0,)
def non_HomoNR():
f = input("enter f(x,y) in pyth n syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan(). \n")
g = input("enter g(x,y) expression in python syntax,ie. x^2 as x**2 , write math functions as func() , like exp(), tan(). \n")
x_0,y_0 = map(float, input("x0 y0 :").split())
fx = diff(f,x)
fy = diff(f,y)
gx = diff(g,x)
gy = diff(g,y)
lf= eval("lambda x,y:"+str(f))
lg= eval("lambda x,y:"+str(g))
lfx= eval("lambda x,y:"+str(fx))
lgx= eval("lambda x,y:"+str(gx))
lfy= eval("lambda x,y:"+str(fy))
lgy= eval("lambda x,y:"+str(gy))
counter = 0
h = lambda x,y: ((lg(x,y)*lfy(x,y)-lf(x,y)*lgy(x,y))/\
(lfx(x,y)*lgy(x,y)-lgx(x,y)*lfy(x,y)))
k = lambda x,y: ((lf(x,y)*lgx(x,y)-lg(x,y)*lfx(x,y))/\
(lfx(x,y)*lgy(x,y)-lgx(x,y)*lfy(x,y)))
print ("n \t x \t\t y \t\t h \t\t k ")
while (abs((h(x_0,y_0)+k(x_0,y_0))) >= 0.00002):
hv = h(x_0,y_0)
kv = k(x_0,y_0)
print(counter,"\t",'%.5f'%x_0,"\t",'%.5f'%y_0,"\t" ,'%.5f'%hv,"\t" , '%.5f'%kv)
counter +=1
x_0=x_0+h(x_0,y_0)
y_0=y_0+k(x_0,y_0)
print("The value of root is : ",'%.5f'%x_0,'%.5f'%y_0)
def main():
print("Starting.........")
# a , b = map(int, input("enter a b :").split())
case=int(input("enter 1 for single variable and 2 for multi \t"))
if case == 1:
standardNR()
else :
non_HomoNR()
if __name__ == "__main__":
main() | 0.243552 | 0.418756 |
import numpy as np
import scipy
#--------------------------------------------------
def diaganal_average(matrix, reverse = True,
samesize = False, averaging = True):
'''
Hankel averaging
(or diaganale averaging) of the matrix.
Parameters
------------
* matrix: 2d ndarray,
is the input matrix.
* reverse: bool,
if True, backward diaganales
will be taken.
* samesize: bool,
if True, only diganal from the main
to the leftest will be taken.
* averaging: bool,
if True, mean value by each diaganale
will be taken, else summ insted of mean.
Returns
-----------
* vector: 1d ndarray,
if samesize = True
with size = matrix.raws+matrix.columns-1
if samesize = False
size= (matrix.raws+matrix.columns-1)//2.
Notes
------------
* If samesize = False:
if reverse = False,
the diaganles from left bottom
to the right upper will be taken
if reverse = True,
the diaganles from right bottom
to the left upper ([0,0] element) will be taken.
* If samesize = True:
if reverse = False,
the diaganles from left bottom
to the main one will be taken
if reverse = True,
the diaganles from right bottom
to the main one will be taken.
Example
--------------
'''
(raws, columns) = matrix.shape
n_diags = raws+columns-1
if(samesize):
n_diags = n_diags//2+1
out = np.zeros(n_diags, dtype = matrix.dtype)
for idx_from_bottom in np.arange(n_diags):
idx = idx_from_bottom - raws + 1
diag = get_diaganal(matrix, idx,reverse = reverse)
if(not reverse):
if averaging:
out[idx_from_bottom] = np.mean(diag)
else:
out[idx_from_bottom] = np.sum(diag)
else:
if averaging:
out[n_diags-idx_from_bottom-1] = np.mean(diag)
else:
out[idx_from_bottom] = np.sum(diag)
return out
#---------------------------------
def get_diaganal(matrix, idx,reverse = False):
'''
Get idx diaganale of matrix,
counting from zero diag position.
Parameters
------------
* matrix: 2d ndarray,
is the input matrix.
* idx int,
is the index of diganale
from main diganale (zero diag).
* reverse: bool,
if True, backward diaganales
will be taken.
Returns
-----------
* diag: 1d ndarray.
Notes:
-------------
* if reverce = False:
idx = 0 - main diaganale
idx > 0 - left direction
idx < 0 - right direction
* if reverce = True:
idx = 0 - main backward diaganale
idx > 0 - right direction
idx < 0 - left direction
Example
-------------
a = [1,2,3,4,5]
b = signals.matrix.toeplitz(a)[:3,:]
print(b)
print(get_diaganal(b, 0)) # zero diaganale
print(get_diaganal(b, -2)) # 2 diaganel in left direction
print(get_diaganal(b, 3)) # 3 diaganel in right direction
print(get_diaganal(b, 0,reverse=True)) # zero backward diaganale
print(get_diaganal(b, -1,reverse=True)) # 1 right backward diaganale
print(get_diaganal(b, 1,reverse=True)) # 1 left backward diaganale
'''
(raws, columns) = matrix.shape
n_diags = raws+columns-1
idx_from_bottom = idx + raws-1
if(idx_from_bottom>=n_diags or idx_from_bottom<0):
raise ValueError('idx value out of matrix shape ')
len_of_diag = _length_of_diag_(matrix, idx_from_bottom)
out = np.zeros(len_of_diag, dtype = matrix.dtype)
if(not reverse):
if(idx>=0):
for i in np.arange (len_of_diag):
out[i] = matrix[i,i+idx]
if(idx<0):
idx = np.abs(idx)
for i in np.arange (len_of_diag):
out[i] = matrix[i+idx,i]
else:
if(idx>=0):
for i in np.arange (len_of_diag):
indexes = columns-1 -i-idx
out[i] = matrix[i,indexes]
if(idx<0):
idx = np.abs(idx)
for i in np.arange (len_of_diag):
idx = np.abs(idx)
indexes = columns-1 -i
out[i] = matrix[i+idx,indexes]
return out
#--------------------------------------------------
def _length_of_diag_(matrix, idx):
'''
Get length of
idx diaganal of matrix.
Parameters
-----------
* matrix: 2d ndarray,
is the input matrix.
* idx int,
is the index of diganale
from main diganale (zero diag).
Returns
----------
* len: int,
length of diaganal.
Notes
----------
* idx is calculated from element 0,0,
Thus, for isntance 0 diaganale has length 1
the next one has length 2.
Examples
-----------
'''
matrix = np.asarray(matrix)
(raws, columns) = matrix.shape
n_diags = raws+columns-1
if(idx>=n_diags):
raise ValueError('index is out of diaganal number, ',n_diags)
length = 0
rank = min(raws, columns)
if (idx<n_diags//2):
length = min(idx+1,rank)
else:
length = min(n_diags-idx,rank)
return max(length,0) | dsatools/operators/_diagnalization.py | import numpy as np
import scipy
#--------------------------------------------------
def diaganal_average(matrix, reverse = True,
samesize = False, averaging = True):
'''
Hankel averaging
(or diaganale averaging) of the matrix.
Parameters
------------
* matrix: 2d ndarray,
is the input matrix.
* reverse: bool,
if True, backward diaganales
will be taken.
* samesize: bool,
if True, only diganal from the main
to the leftest will be taken.
* averaging: bool,
if True, mean value by each diaganale
will be taken, else summ insted of mean.
Returns
-----------
* vector: 1d ndarray,
if samesize = True
with size = matrix.raws+matrix.columns-1
if samesize = False
size= (matrix.raws+matrix.columns-1)//2.
Notes
------------
* If samesize = False:
if reverse = False,
the diaganles from left bottom
to the right upper will be taken
if reverse = True,
the diaganles from right bottom
to the left upper ([0,0] element) will be taken.
* If samesize = True:
if reverse = False,
the diaganles from left bottom
to the main one will be taken
if reverse = True,
the diaganles from right bottom
to the main one will be taken.
Example
--------------
'''
(raws, columns) = matrix.shape
n_diags = raws+columns-1
if(samesize):
n_diags = n_diags//2+1
out = np.zeros(n_diags, dtype = matrix.dtype)
for idx_from_bottom in np.arange(n_diags):
idx = idx_from_bottom - raws + 1
diag = get_diaganal(matrix, idx,reverse = reverse)
if(not reverse):
if averaging:
out[idx_from_bottom] = np.mean(diag)
else:
out[idx_from_bottom] = np.sum(diag)
else:
if averaging:
out[n_diags-idx_from_bottom-1] = np.mean(diag)
else:
out[idx_from_bottom] = np.sum(diag)
return out
#---------------------------------
def get_diaganal(matrix, idx,reverse = False):
'''
Get idx diaganale of matrix,
counting from zero diag position.
Parameters
------------
* matrix: 2d ndarray,
is the input matrix.
* idx int,
is the index of diganale
from main diganale (zero diag).
* reverse: bool,
if True, backward diaganales
will be taken.
Returns
-----------
* diag: 1d ndarray.
Notes:
-------------
* if reverce = False:
idx = 0 - main diaganale
idx > 0 - left direction
idx < 0 - right direction
* if reverce = True:
idx = 0 - main backward diaganale
idx > 0 - right direction
idx < 0 - left direction
Example
-------------
a = [1,2,3,4,5]
b = signals.matrix.toeplitz(a)[:3,:]
print(b)
print(get_diaganal(b, 0)) # zero diaganale
print(get_diaganal(b, -2)) # 2 diaganel in left direction
print(get_diaganal(b, 3)) # 3 diaganel in right direction
print(get_diaganal(b, 0,reverse=True)) # zero backward diaganale
print(get_diaganal(b, -1,reverse=True)) # 1 right backward diaganale
print(get_diaganal(b, 1,reverse=True)) # 1 left backward diaganale
'''
(raws, columns) = matrix.shape
n_diags = raws+columns-1
idx_from_bottom = idx + raws-1
if(idx_from_bottom>=n_diags or idx_from_bottom<0):
raise ValueError('idx value out of matrix shape ')
len_of_diag = _length_of_diag_(matrix, idx_from_bottom)
out = np.zeros(len_of_diag, dtype = matrix.dtype)
if(not reverse):
if(idx>=0):
for i in np.arange (len_of_diag):
out[i] = matrix[i,i+idx]
if(idx<0):
idx = np.abs(idx)
for i in np.arange (len_of_diag):
out[i] = matrix[i+idx,i]
else:
if(idx>=0):
for i in np.arange (len_of_diag):
indexes = columns-1 -i-idx
out[i] = matrix[i,indexes]
if(idx<0):
idx = np.abs(idx)
for i in np.arange (len_of_diag):
idx = np.abs(idx)
indexes = columns-1 -i
out[i] = matrix[i+idx,indexes]
return out
#--------------------------------------------------
def _length_of_diag_(matrix, idx):
'''
Get length of
idx diaganal of matrix.
Parameters
-----------
* matrix: 2d ndarray,
is the input matrix.
* idx int,
is the index of diganale
from main diganale (zero diag).
Returns
----------
* len: int,
length of diaganal.
Notes
----------
* idx is calculated from element 0,0,
Thus, for isntance 0 diaganale has length 1
the next one has length 2.
Examples
-----------
'''
matrix = np.asarray(matrix)
(raws, columns) = matrix.shape
n_diags = raws+columns-1
if(idx>=n_diags):
raise ValueError('index is out of diaganal number, ',n_diags)
length = 0
rank = min(raws, columns)
if (idx<n_diags//2):
length = min(idx+1,rank)
else:
length = min(n_diags-idx,rank)
return max(length,0) | 0.704567 | 0.535706 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from copy import deepcopy
from math import floor
from math import ceil
from compas.datastructures import meshes_join_and_weld
from compas.topology import adjacency_from_edges
from compas.topology import connected_components
from compas.geometry import discrete_coons_patch
from compas.geometry import vector_average
from compas.utilities import pairwise
from ..mesh import Mesh
from ..mesh_quad import QuadMesh
class CoarseQuadMesh(QuadMesh):
def __init__(self):
super(CoarseQuadMesh, self).__init__()
self.attributes['strips_density'] = {}
self.attributes['vertex_coarse_to_dense'] = {}
self.attributes['edge_coarse_to_dense'] = {}
self.attributes['quad_mesh'] = None
self.attributes['polygonal_mesh'] = None
# --------------------------------------------------------------------------
# constructors
# --------------------------------------------------------------------------
@classmethod
def from_quad_mesh(cls, quad_mesh, collect_strips=True, collect_polyedges=True, attribute_density=True):
"""Build coarse quad mesh from quad mesh with density and child-parent element data.
Parameters
----------
quad_mesh : QuadMesh
A quad mesh.
attribute_density : bool, optional
Keep density data of dense quad mesh and inherit it as aatribute.
Returns
----------
coarse_quad_mesh : CoarseQuadMesh
A coarse quad mesh with density data.
"""
polyedges = quad_mesh.singularity_polyedge_decomposition()
# vertex data
vertices = {vkey: quad_mesh.vertex_coordinates(vkey) for vkey in quad_mesh.vertices()}
coarse_vertices_children = {vkey: vkey for polyedge in polyedges for vkey in [polyedge[0], polyedge[-1]]}
coarse_vertices = {vkey: quad_mesh.vertex_coordinates(vkey) for vkey in coarse_vertices_children}
# edge data
coarse_edges_children = {(polyedge[0], polyedge[-1]): polyedge for polyedge in polyedges}
singularity_edges = [(x, y) for polyedge in polyedges for u, v in pairwise(polyedge) for x, y in [(u, v), (v, u)]]
# face data
faces = {fkey: quad_mesh.face_vertices(fkey) for fkey in quad_mesh.faces()}
adj_edges = {(f1, f2) for f1 in quad_mesh.faces() for f2 in quad_mesh.face_neighbors(f1) if f1 < f2 and quad_mesh.face_adjacency_halfedge(f1, f2) not in singularity_edges}
coarse_faces_children = {}
for i, connected_faces in enumerate(connected_components(adjacency_from_edges(adj_edges))):
mesh = Mesh.from_vertices_and_faces(vertices, [faces[face] for face in connected_faces])
coarse_faces_children[i] = [vkey for vkey in reversed(mesh.boundaries()[0]) if mesh.vertex_degree(vkey) == 2]
coarse_quad_mesh = cls.from_vertices_and_faces(coarse_vertices, coarse_faces_children)
# attribute relation child-parent element between coarse and dense quad meshes
coarse_quad_mesh.attributes['vertex_coarse_to_dense'] = coarse_vertices_children
coarse_quad_mesh.attributes['edge_coarse_to_dense'] = {u: {} for u in coarse_quad_mesh.vertices()}
for (u, v), polyedge in coarse_edges_children.items():
coarse_quad_mesh.attributes['edge_coarse_to_dense'][u][v] = polyedge
coarse_quad_mesh.attributes['edge_coarse_to_dense'][v][u] = list(reversed(polyedge))
# collect strip and polyedge attributes
if collect_strips:
coarse_quad_mesh.collect_strips()
if collect_polyedges:
coarse_quad_mesh.collect_polyedges()
# store density attribute from input dense quad mesh
if attribute_density:
coarse_quad_mesh.set_strips_density(1)
for skey in coarse_quad_mesh.strips():
u, v = coarse_quad_mesh.strip_edges(skey)[0]
d = len(coarse_edges_children.get((u, v), coarse_edges_children.get((v, u), [])))
coarse_quad_mesh.set_strip_density(skey, d)
# store quad mesh and use as polygonal mesh
coarse_quad_mesh.set_quad_mesh(quad_mesh)
coarse_quad_mesh.set_polygonal_mesh(deepcopy(quad_mesh))
return coarse_quad_mesh
# --------------------------------------------------------------------------
# meshes getters and setters
# --------------------------------------------------------------------------
def get_quad_mesh(self):
return self.attributes['quad_mesh']
def set_quad_mesh(self, quad_mesh):
self.attributes['quad_mesh'] = quad_mesh
def get_polygonal_mesh(self):
return self.attributes['polygonal_mesh']
def set_polygonal_mesh(self, polygonal_mesh):
self.attributes['polygonal_mesh'] = polygonal_mesh
# --------------------------------------------------------------------------
# element child-parent relation getters
# --------------------------------------------------------------------------
def coarse_edge_dense_edges(self, u, v):
"""Return the child edges, or polyedge, in the dense quad mesh from a parent edge in the coarse quad mesh."""
return self.attributes['edge_coarse_to_dense'][u][v]
# --------------------------------------------------------------------------
# density getters and setters
# --------------------------------------------------------------------------
def get_strip_density(self, skey):
"""Get the density of a strip.
Parameters
----------
skey : hashable
A strip key.
Returns
----------
int
The strip density.
"""
return self.attributes['strips_density'][skey]
def get_strip_densities(self):
"""Get the density of a strip.
Returns
----------
dict
The dictionary of the strip densities.
"""
return self.attributes['strips_density']
# --------------------------------------------------------------------------
# density setters
# --------------------------------------------------------------------------
def set_strip_density(self, skey, d):
"""Set the densty of one strip.
Parameters
----------
skey : hashable
A strip key.
d : int
A density parameter.
"""
self.attributes['strips_density'][skey] = d
def set_strips_density(self, d, skeys=None):
"""Set the same density to all strips.
Parameters
----------
d : int
A density parameter.
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density(skey, d)
def set_strip_density_target(self, skey, t):
"""Set the strip densities based on a target length and the average length of the strip edges.
Parameters
----------
skey : hashable
A strip key.
t : float
A target length.
"""
self.set_strip_density(skey, int(ceil(vector_average([self.edge_length(u, v) for u, v in self.strip_edges(skey) if u != v]) / t)))
def set_strip_density_func(self, skey, func, func_args):
"""Set the strip densities based on a function.
Parameters
----------
skey : hashable
A strip key.
"""
self.set_strip_density(skey, int(func(skey, func_args)))
def set_strips_density_target(self, t, skeys=None):
"""Set the strip densities based on a target length and the average length of the strip edges.
Parameters
----------
t : float
A target length.
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density_target(skey, t)
def set_strips_density_func(self, func, func_args, skeys=None):
"""Set the strip densities based on a function.
Parameters
----------
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density_func(skey, func, func_args)
def set_mesh_density_face_target(self, nb_faces):
"""Set equal strip densities based on a target number of faces.
Parameters
----------
nb_faces : int
The target number of faces.
"""
n = (nb_faces / self.number_of_faces()) ** .5
if ceil(n) - n > n - floor(n):
n = int(floor(n))
else:
n = int(ceil(n))
self.set_strips_density(n)
# --------------------------------------------------------------------------
# densification
# --------------------------------------------------------------------------
def densification(self):
"""Generate a denser quad mesh from the coarse quad mesh and its strip densities.
"""
edge_strip = {}
for skey, edges in self.strips(data=True):
for edge in edges:
edge_strip[edge] = skey
edge_strip[tuple(reversed(edge))] = skey
face_meshes = {}
for fkey in self.faces():
ab, bc, cd, da = [[self.edge_point(u, v, float(i) / float(self.get_strip_density(edge_strip[(u, v)])))
for i in range(0, self.get_strip_density(edge_strip[(u, v)]) + 1)] for u, v in self.face_halfedges(fkey)]
vertices, faces = discrete_coons_patch(ab, bc, list(reversed(cd)), list(reversed(da)))
face_meshes[fkey] = QuadMesh.from_vertices_and_faces(vertices, faces)
self.set_quad_mesh(meshes_join_and_weld(list(face_meshes.values()))) | src/compas_rv2/singular/datastructures/mesh_quad_coarse/mesh_quad_coarse.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from copy import deepcopy
from math import floor
from math import ceil
from compas.datastructures import meshes_join_and_weld
from compas.topology import adjacency_from_edges
from compas.topology import connected_components
from compas.geometry import discrete_coons_patch
from compas.geometry import vector_average
from compas.utilities import pairwise
from ..mesh import Mesh
from ..mesh_quad import QuadMesh
class CoarseQuadMesh(QuadMesh):
def __init__(self):
super(CoarseQuadMesh, self).__init__()
self.attributes['strips_density'] = {}
self.attributes['vertex_coarse_to_dense'] = {}
self.attributes['edge_coarse_to_dense'] = {}
self.attributes['quad_mesh'] = None
self.attributes['polygonal_mesh'] = None
# --------------------------------------------------------------------------
# constructors
# --------------------------------------------------------------------------
@classmethod
def from_quad_mesh(cls, quad_mesh, collect_strips=True, collect_polyedges=True, attribute_density=True):
"""Build coarse quad mesh from quad mesh with density and child-parent element data.
Parameters
----------
quad_mesh : QuadMesh
A quad mesh.
attribute_density : bool, optional
Keep density data of dense quad mesh and inherit it as aatribute.
Returns
----------
coarse_quad_mesh : CoarseQuadMesh
A coarse quad mesh with density data.
"""
polyedges = quad_mesh.singularity_polyedge_decomposition()
# vertex data
vertices = {vkey: quad_mesh.vertex_coordinates(vkey) for vkey in quad_mesh.vertices()}
coarse_vertices_children = {vkey: vkey for polyedge in polyedges for vkey in [polyedge[0], polyedge[-1]]}
coarse_vertices = {vkey: quad_mesh.vertex_coordinates(vkey) for vkey in coarse_vertices_children}
# edge data
coarse_edges_children = {(polyedge[0], polyedge[-1]): polyedge for polyedge in polyedges}
singularity_edges = [(x, y) for polyedge in polyedges for u, v in pairwise(polyedge) for x, y in [(u, v), (v, u)]]
# face data
faces = {fkey: quad_mesh.face_vertices(fkey) for fkey in quad_mesh.faces()}
adj_edges = {(f1, f2) for f1 in quad_mesh.faces() for f2 in quad_mesh.face_neighbors(f1) if f1 < f2 and quad_mesh.face_adjacency_halfedge(f1, f2) not in singularity_edges}
coarse_faces_children = {}
for i, connected_faces in enumerate(connected_components(adjacency_from_edges(adj_edges))):
mesh = Mesh.from_vertices_and_faces(vertices, [faces[face] for face in connected_faces])
coarse_faces_children[i] = [vkey for vkey in reversed(mesh.boundaries()[0]) if mesh.vertex_degree(vkey) == 2]
coarse_quad_mesh = cls.from_vertices_and_faces(coarse_vertices, coarse_faces_children)
# attribute relation child-parent element between coarse and dense quad meshes
coarse_quad_mesh.attributes['vertex_coarse_to_dense'] = coarse_vertices_children
coarse_quad_mesh.attributes['edge_coarse_to_dense'] = {u: {} for u in coarse_quad_mesh.vertices()}
for (u, v), polyedge in coarse_edges_children.items():
coarse_quad_mesh.attributes['edge_coarse_to_dense'][u][v] = polyedge
coarse_quad_mesh.attributes['edge_coarse_to_dense'][v][u] = list(reversed(polyedge))
# collect strip and polyedge attributes
if collect_strips:
coarse_quad_mesh.collect_strips()
if collect_polyedges:
coarse_quad_mesh.collect_polyedges()
# store density attribute from input dense quad mesh
if attribute_density:
coarse_quad_mesh.set_strips_density(1)
for skey in coarse_quad_mesh.strips():
u, v = coarse_quad_mesh.strip_edges(skey)[0]
d = len(coarse_edges_children.get((u, v), coarse_edges_children.get((v, u), [])))
coarse_quad_mesh.set_strip_density(skey, d)
# store quad mesh and use as polygonal mesh
coarse_quad_mesh.set_quad_mesh(quad_mesh)
coarse_quad_mesh.set_polygonal_mesh(deepcopy(quad_mesh))
return coarse_quad_mesh
# --------------------------------------------------------------------------
# meshes getters and setters
# --------------------------------------------------------------------------
def get_quad_mesh(self):
return self.attributes['quad_mesh']
def set_quad_mesh(self, quad_mesh):
self.attributes['quad_mesh'] = quad_mesh
def get_polygonal_mesh(self):
return self.attributes['polygonal_mesh']
def set_polygonal_mesh(self, polygonal_mesh):
self.attributes['polygonal_mesh'] = polygonal_mesh
# --------------------------------------------------------------------------
# element child-parent relation getters
# --------------------------------------------------------------------------
def coarse_edge_dense_edges(self, u, v):
"""Return the child edges, or polyedge, in the dense quad mesh from a parent edge in the coarse quad mesh."""
return self.attributes['edge_coarse_to_dense'][u][v]
# --------------------------------------------------------------------------
# density getters and setters
# --------------------------------------------------------------------------
def get_strip_density(self, skey):
"""Get the density of a strip.
Parameters
----------
skey : hashable
A strip key.
Returns
----------
int
The strip density.
"""
return self.attributes['strips_density'][skey]
def get_strip_densities(self):
"""Get the density of a strip.
Returns
----------
dict
The dictionary of the strip densities.
"""
return self.attributes['strips_density']
# --------------------------------------------------------------------------
# density setters
# --------------------------------------------------------------------------
def set_strip_density(self, skey, d):
"""Set the densty of one strip.
Parameters
----------
skey : hashable
A strip key.
d : int
A density parameter.
"""
self.attributes['strips_density'][skey] = d
def set_strips_density(self, d, skeys=None):
"""Set the same density to all strips.
Parameters
----------
d : int
A density parameter.
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density(skey, d)
def set_strip_density_target(self, skey, t):
"""Set the strip densities based on a target length and the average length of the strip edges.
Parameters
----------
skey : hashable
A strip key.
t : float
A target length.
"""
self.set_strip_density(skey, int(ceil(vector_average([self.edge_length(u, v) for u, v in self.strip_edges(skey) if u != v]) / t)))
def set_strip_density_func(self, skey, func, func_args):
"""Set the strip densities based on a function.
Parameters
----------
skey : hashable
A strip key.
"""
self.set_strip_density(skey, int(func(skey, func_args)))
def set_strips_density_target(self, t, skeys=None):
"""Set the strip densities based on a target length and the average length of the strip edges.
Parameters
----------
t : float
A target length.
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density_target(skey, t)
def set_strips_density_func(self, func, func_args, skeys=None):
"""Set the strip densities based on a function.
Parameters
----------
skeys : list, None
The keys of strips to set density. If is None, all strips are considered.
"""
if skeys is None:
skeys = self.strips()
for skey in skeys:
self.set_strip_density_func(skey, func, func_args)
def set_mesh_density_face_target(self, nb_faces):
"""Set equal strip densities based on a target number of faces.
Parameters
----------
nb_faces : int
The target number of faces.
"""
n = (nb_faces / self.number_of_faces()) ** .5
if ceil(n) - n > n - floor(n):
n = int(floor(n))
else:
n = int(ceil(n))
self.set_strips_density(n)
# --------------------------------------------------------------------------
# densification
# --------------------------------------------------------------------------
def densification(self):
"""Generate a denser quad mesh from the coarse quad mesh and its strip densities.
"""
edge_strip = {}
for skey, edges in self.strips(data=True):
for edge in edges:
edge_strip[edge] = skey
edge_strip[tuple(reversed(edge))] = skey
face_meshes = {}
for fkey in self.faces():
ab, bc, cd, da = [[self.edge_point(u, v, float(i) / float(self.get_strip_density(edge_strip[(u, v)])))
for i in range(0, self.get_strip_density(edge_strip[(u, v)]) + 1)] for u, v in self.face_halfedges(fkey)]
vertices, faces = discrete_coons_patch(ab, bc, list(reversed(cd)), list(reversed(da)))
face_meshes[fkey] = QuadMesh.from_vertices_and_faces(vertices, faces)
self.set_quad_mesh(meshes_join_and_weld(list(face_meshes.values()))) | 0.89903 | 0.395835 |