code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def trace(paths, destination):
"""
Traces path from destination to origin.
Input:
paths; dictionary
Return:
List containing tuples of x-, y-, z-coordinates.
"""
coordinates = destination
path = [coordinates]
# iterate over keys in dictionary until the path is traced
while coordinates in paths:
coordinates = paths[coordinates]
path.append(coordinates)
path.reverse()
return path
def matlib_convert(path):
"""
Convert tuples of x-, y-, z-coordinates to x-, y-, z-coordinate lists for visualisation via matplotlib
Input:
path; list containing tuples of x-, y-, z-coordinates.
Return:
Tuple of x-, y-, z-coordinate lists.
"""
x_list = []
y_list = []
z_list = []
for coordinate in path:
x_list.append(coordinate[0])
y_list.append(coordinate[1])
z_list.append(coordinate[2])
return (x_list, y_list, z_list)
def plot(x_gates, y_gates, z_gates, boundaries, paths, count_wires):
"""
Plot gates and connections in a 3D grid.
Input:
x_gates; list of x-coordinates of all gates.
y_gates; list of y-coordinates of all gates.
z_gates; list of z-coordinates of all gates.
boundaries; tuple of x-, y-, z-coordinates.
paths; dictionary containing all paths between gates.
count_wires; integer.
Return:
None
"""
# create figure with correct axes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.xticks(np.arange(0, boundaries[1][0] + 1, 1))
plt.yticks(np.arange(0, boundaries[1][1] + 1, 1))
plt.title(f"Total wire length: {count_wires}")
ax.set_xlim3d(0, boundaries[1][0], 1)
ax.set_ylim3d(0, boundaries[1][1], 1)
ax.set_zlim3d(0, 7)
# plot all gates
for m, zlow, zhigh in [('s', 0, 7)]:
x = x_gates
y = y_gates
z = z_gates
ax.scatter(x, y, z, marker=m)
# plot all connections
for connection in paths:
ax.plot(paths[connection][0], paths[connection][1], paths[connection][2], '-')
# axis names
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show() | code/helpers/helpers.py | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def trace(paths, destination):
"""
Traces path from destination to origin.
Input:
paths; dictionary
Return:
List containing tuples of x-, y-, z-coordinates.
"""
coordinates = destination
path = [coordinates]
# iterate over keys in dictionary until the path is traced
while coordinates in paths:
coordinates = paths[coordinates]
path.append(coordinates)
path.reverse()
return path
def matlib_convert(path):
"""
Convert tuples of x-, y-, z-coordinates to x-, y-, z-coordinate lists for visualisation via matplotlib
Input:
path; list containing tuples of x-, y-, z-coordinates.
Return:
Tuple of x-, y-, z-coordinate lists.
"""
x_list = []
y_list = []
z_list = []
for coordinate in path:
x_list.append(coordinate[0])
y_list.append(coordinate[1])
z_list.append(coordinate[2])
return (x_list, y_list, z_list)
def plot(x_gates, y_gates, z_gates, boundaries, paths, count_wires):
"""
Plot gates and connections in a 3D grid.
Input:
x_gates; list of x-coordinates of all gates.
y_gates; list of y-coordinates of all gates.
z_gates; list of z-coordinates of all gates.
boundaries; tuple of x-, y-, z-coordinates.
paths; dictionary containing all paths between gates.
count_wires; integer.
Return:
None
"""
# create figure with correct axes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.xticks(np.arange(0, boundaries[1][0] + 1, 1))
plt.yticks(np.arange(0, boundaries[1][1] + 1, 1))
plt.title(f"Total wire length: {count_wires}")
ax.set_xlim3d(0, boundaries[1][0], 1)
ax.set_ylim3d(0, boundaries[1][1], 1)
ax.set_zlim3d(0, 7)
# plot all gates
for m, zlow, zhigh in [('s', 0, 7)]:
x = x_gates
y = y_gates
z = z_gates
ax.scatter(x, y, z, marker=m)
# plot all connections
for connection in paths:
ax.plot(paths[connection][0], paths[connection][1], paths[connection][2], '-')
# axis names
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show() | 0.859531 | 0.77223 |
import torch
import os
import sys
import torch.nn as nn
from MWCNN import WCNN,IWCNN
import torch.nn.functional as F
def init_weights(m):
if type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
class SimpleCNN(torch.nn.Module):
#Our batch shape for input x is (3, 32, 32)
def __init__(self,in_ch=1,out_ch=3):
super(SimpleCNN, self).__init__()
#Input channels = 3, output channels = 18
self.conv1 = torch.nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1)
self.conv2 =torch.nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1)
self.conv3 =torch.nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1)
#self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
#4608 input features, 64 output features (see sizing flow below)
#64 input features, 10 output features for our 10 defined classes
def forward(self, x):
#Computes the activation of the first convolution
#Size changes from (3, 32, 32) to (18, 32, 32)
x = F.relu(self.conv1(x))
#Size changes from (18, 32, 32) to (18, 16, 16)
#Reshape data to input to the input layer of the neural net
#Size changes from (18, 16, 16) to (1, 4608)
#Recall that the -1 infers this dimension from the other given dimension
#Computes the activation of the first fully connected layer
#Size changes from (1, 4608) to (1, 64)
x = F.relu(self.conv2(x))
#Computes the second fully connected layer (activation applied later)
#Size changes from (1, 64) to (1, 10)
x = self.conv3(x)
return(x)
class MW_Unet(nn.Module):
"""
Baseline architecture for Multi-level Wavelet-CNN paper
Incorporates Unet style concatenation of dims
input:N,C,H,W
output: N,C,H,W
"""
def __init__(self,num_conv=2,in_ch=1,out_ch=3,channel_1=32,channel_2=64,channel_3 = 96,channel_4 = 128):
'''
:param: num_conv per contraction and expansion layer, how many extra conv-batch-relu layers wanted
:param in_ch: number of input channels expected
:return:
'''
super(MW_Unet,self).__init__()
print("channel_1: {}, channel_2: {} num_conv: {}".format(channel_1,channel_2,num_conv))
self.num_conv = num_conv
self.in_ch = in_ch
self.out_ch = out_ch
self.cnn_1 = WCNN(in_ch=in_ch,out_ch=channel_1,num_conv=num_conv) #output N,160,H/2,W/2
self.cnn_2 = WCNN(in_ch=channel_1,out_ch=channel_2,num_conv=num_conv)
self.cnn_3 = WCNN(in_ch=channel_2,out_ch=channel_3,num_conv=num_conv)
self.cnn_4 = WCNN(in_ch=channel_3,out_ch=channel_4,num_conv=num_conv)
self.cnn_5 = WCNN(in_ch = channel_4,out_ch=channel_4,num_conv=num_conv)
self.icnn_5 = IWCNN(in_ch=channel_4,internal_ch=4*channel_4,num_conv=num_conv)
self.icnn_4 =IWCNN(in_ch= 2*channel_4,internal_ch=4*channel_3,num_conv=num_conv)
self.icnn_3 = IWCNN(in_ch=2*channel_3,internal_ch=4*channel_2,num_conv=num_conv)
self.icnn_2 = IWCNN(in_ch=2*channel_2,internal_ch=4*channel_1,num_conv=num_conv) #expecting 2*256 because of skip connection
self.icnn_1 = IWCNN(in_ch=2*channel_1,internal_ch=self.in_ch*4,num_conv=num_conv) # output N,in_ch,H,W
self.final_conv = nn.Conv2d(in_channels=self.in_ch,out_channels=self.out_ch,kernel_size=3,padding=1)
def forward(self,x):
x1 = self.cnn_1(x)
x2 = self.cnn_2(x1)
x3 = self.cnn_3(x2)
x4 = self.cnn_4(x3)
x5 = self.cnn_5(x4)
y_0 = self.icnn_5(x5)
y0 = self.icnn_4(torch.cat((y_0,x4),dim=1))
y1 = self.icnn_3(torch.cat((y0,x3),dim=1))
y2 = self.icnn_2(torch.cat((y1,x2),dim=1))
y3 = self.icnn_1(torch.cat((y2,x1),dim=1))
output = self.final_conv(y3)
return output
if __name__ == "__main__":
print("testing MW_Unet")
X = torch.randn(9, 5, 128, 128)
print(X.dtype)
N, C, H, W = X.shape
Unet = MW_Unet(in_ch=C)
Unet.apply(init_weights)
Y = Unet(X)
print("shape of X: ", X.shape)
print("shape of Y: ", Y.shape)
#print(torch.mean(X - Y)) | project/models/MWU_CNN.py | import torch
import os
import sys
import torch.nn as nn
from MWCNN import WCNN,IWCNN
import torch.nn.functional as F
def init_weights(m):
if type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
class SimpleCNN(torch.nn.Module):
#Our batch shape for input x is (3, 32, 32)
def __init__(self,in_ch=1,out_ch=3):
super(SimpleCNN, self).__init__()
#Input channels = 3, output channels = 18
self.conv1 = torch.nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1)
self.conv2 =torch.nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1)
self.conv3 =torch.nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1)
#self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
#4608 input features, 64 output features (see sizing flow below)
#64 input features, 10 output features for our 10 defined classes
def forward(self, x):
#Computes the activation of the first convolution
#Size changes from (3, 32, 32) to (18, 32, 32)
x = F.relu(self.conv1(x))
#Size changes from (18, 32, 32) to (18, 16, 16)
#Reshape data to input to the input layer of the neural net
#Size changes from (18, 16, 16) to (1, 4608)
#Recall that the -1 infers this dimension from the other given dimension
#Computes the activation of the first fully connected layer
#Size changes from (1, 4608) to (1, 64)
x = F.relu(self.conv2(x))
#Computes the second fully connected layer (activation applied later)
#Size changes from (1, 64) to (1, 10)
x = self.conv3(x)
return(x)
class MW_Unet(nn.Module):
"""
Baseline architecture for Multi-level Wavelet-CNN paper
Incorporates Unet style concatenation of dims
input:N,C,H,W
output: N,C,H,W
"""
def __init__(self,num_conv=2,in_ch=1,out_ch=3,channel_1=32,channel_2=64,channel_3 = 96,channel_4 = 128):
'''
:param: num_conv per contraction and expansion layer, how many extra conv-batch-relu layers wanted
:param in_ch: number of input channels expected
:return:
'''
super(MW_Unet,self).__init__()
print("channel_1: {}, channel_2: {} num_conv: {}".format(channel_1,channel_2,num_conv))
self.num_conv = num_conv
self.in_ch = in_ch
self.out_ch = out_ch
self.cnn_1 = WCNN(in_ch=in_ch,out_ch=channel_1,num_conv=num_conv) #output N,160,H/2,W/2
self.cnn_2 = WCNN(in_ch=channel_1,out_ch=channel_2,num_conv=num_conv)
self.cnn_3 = WCNN(in_ch=channel_2,out_ch=channel_3,num_conv=num_conv)
self.cnn_4 = WCNN(in_ch=channel_3,out_ch=channel_4,num_conv=num_conv)
self.cnn_5 = WCNN(in_ch = channel_4,out_ch=channel_4,num_conv=num_conv)
self.icnn_5 = IWCNN(in_ch=channel_4,internal_ch=4*channel_4,num_conv=num_conv)
self.icnn_4 =IWCNN(in_ch= 2*channel_4,internal_ch=4*channel_3,num_conv=num_conv)
self.icnn_3 = IWCNN(in_ch=2*channel_3,internal_ch=4*channel_2,num_conv=num_conv)
self.icnn_2 = IWCNN(in_ch=2*channel_2,internal_ch=4*channel_1,num_conv=num_conv) #expecting 2*256 because of skip connection
self.icnn_1 = IWCNN(in_ch=2*channel_1,internal_ch=self.in_ch*4,num_conv=num_conv) # output N,in_ch,H,W
self.final_conv = nn.Conv2d(in_channels=self.in_ch,out_channels=self.out_ch,kernel_size=3,padding=1)
def forward(self,x):
x1 = self.cnn_1(x)
x2 = self.cnn_2(x1)
x3 = self.cnn_3(x2)
x4 = self.cnn_4(x3)
x5 = self.cnn_5(x4)
y_0 = self.icnn_5(x5)
y0 = self.icnn_4(torch.cat((y_0,x4),dim=1))
y1 = self.icnn_3(torch.cat((y0,x3),dim=1))
y2 = self.icnn_2(torch.cat((y1,x2),dim=1))
y3 = self.icnn_1(torch.cat((y2,x1),dim=1))
output = self.final_conv(y3)
return output
if __name__ == "__main__":
print("testing MW_Unet")
X = torch.randn(9, 5, 128, 128)
print(X.dtype)
N, C, H, W = X.shape
Unet = MW_Unet(in_ch=C)
Unet.apply(init_weights)
Y = Unet(X)
print("shape of X: ", X.shape)
print("shape of Y: ", Y.shape)
#print(torch.mean(X - Y)) | 0.803521 | 0.499756 |
"""Setup dot py."""
from __future__ import absolute_import, print_function
import zipfile
from glob import glob
from pathlib import Path
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
def read(*names, **kwargs):
"""Read description files."""
path = join(dirname(__file__), *names)
with open(path, encoding=kwargs.get('encoding', 'utf8')) as fh:
return fh.read()
long_description = '{}\n{}'.format(
read('README.rst'),
read(join('docs', 'CHANGELOG.rst')),
)
_path2simple = Path(
Path(__file__).resolve().parent,
'src',
'mcsce',
'core',
'data',
'SimpleOpt1-5.zip')
with zipfile.ZipFile(_path2simple, 'r') as dbzip:
dbzip.extractall(Path(_path2simple.parent, 'SimpleOpt1-5'))
setup(
name='mcsce',
version='0.1.0',
description=(
'Monte Carlo Side Chain Entropy package for generating side '
'chain packing for fixed protein backbone.'),
long_description=long_description,
long_description_content_type='text/x-rst',
license='MIT License',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
url='https://github.com/THGLab/MCSCE',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(i))[0] for i in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
project_urls={
'webpage': 'https://github.com/THGLab/MCSCE',
'Documentation': 'https://MCSCE.readthedocs.io/en/latest/',
'Changelog': 'https://github.com/THGLab/MCSCE/blob/master/docs/CHANGELOG.rst',
'Issue Tracker': 'https://github.com/THGLab/MCSCE/issues',
'Discussion Forum': 'https://github.com/THGLab/MCSCE/discussions',
},
keywords=[
'Structural Biology', 'Proteins',
],
python_requires='>=3.7, <4',
install_requires=[
],
extras_require={
},
setup_requires=[
],
entry_points={
'console_scripts': [
'mcsce=mcsce.cli:maincli',
]
},
) | setup.py | """Setup dot py."""
from __future__ import absolute_import, print_function
import zipfile
from glob import glob
from pathlib import Path
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
def read(*names, **kwargs):
"""Read description files."""
path = join(dirname(__file__), *names)
with open(path, encoding=kwargs.get('encoding', 'utf8')) as fh:
return fh.read()
long_description = '{}\n{}'.format(
read('README.rst'),
read(join('docs', 'CHANGELOG.rst')),
)
_path2simple = Path(
Path(__file__).resolve().parent,
'src',
'mcsce',
'core',
'data',
'SimpleOpt1-5.zip')
with zipfile.ZipFile(_path2simple, 'r') as dbzip:
dbzip.extractall(Path(_path2simple.parent, 'SimpleOpt1-5'))
setup(
name='mcsce',
version='0.1.0',
description=(
'Monte Carlo Side Chain Entropy package for generating side '
'chain packing for fixed protein backbone.'),
long_description=long_description,
long_description_content_type='text/x-rst',
license='MIT License',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
url='https://github.com/THGLab/MCSCE',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(i))[0] for i in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list:
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
project_urls={
'webpage': 'https://github.com/THGLab/MCSCE',
'Documentation': 'https://MCSCE.readthedocs.io/en/latest/',
'Changelog': 'https://github.com/THGLab/MCSCE/blob/master/docs/CHANGELOG.rst',
'Issue Tracker': 'https://github.com/THGLab/MCSCE/issues',
'Discussion Forum': 'https://github.com/THGLab/MCSCE/discussions',
},
keywords=[
'Structural Biology', 'Proteins',
],
python_requires='>=3.7, <4',
install_requires=[
],
extras_require={
},
setup_requires=[
],
entry_points={
'console_scripts': [
'mcsce=mcsce.cli:maincli',
]
},
) | 0.745954 | 0.183301 |
import torch
import warnings
import time
from dataset.fake_human.melanoma_fake_human_dataset import MelanomaFakeHumanDataset
from support import cprint, Color
from neural_network.convolutional_melanoma_neural_network import ConvolutionalMelanomaNeuralNetwork
warnings.filterwarnings("ignore", category=UserWarning)
# setting device
device = "cuda" if torch.cuda.is_available() else "cpu"
cprint("Running on {}...".format(device), Color.BLUE)
# defining fixed parameters
training_set_size = 10000 #TODO 10000
test_set_size = 900 #TODO 900
training_epochs = 100 #TODO 100
# defining base parameters (neural network)
batch_size = 4 #TODO 32
input_size = 512 * 512 * 3
output_size = 1
# defining Neural Network
neural_network = ConvolutionalMelanomaNeuralNetwork(device)
neural_network.criterion = torch.nn.MSELoss()
neural_network.optimizer = torch.optim.Adam(neural_network.parameters(), lr=1e-4, weight_decay=5e-4)
# defining dataset
dataset_fake_human = MelanomaFakeHumanDataset(training_set_size, 0, test_set_size, batch_size)
# printing statistics start neural network
loss, accuracy = neural_network.get_statistics(dataset_fake_human.get_metrics_dataloader())
cprint("Start loss: {}".format(loss), Color.RED, loggable = True)
cprint("Start accuracy: {}".format(accuracy), Color.RED, loggable = True)
# making training
cprint("Training neural network...", Color.GREEN)
start_time = time.time()
neural_network.fit(dataset_fake_human.get_dataloader(), training_epochs)
loss, accuracy = neural_network.get_statistics(dataset_fake_human.get_metrics_dataloader())
end_time = time.time()
# printing and saving results
cprint("Neural network training time: {}".format(end_time - start_time), Color.RED, loggable = True)
cprint("End loss: {}".format(loss), Color.RED, loggable = True)
cprint("End accuracy: {}".format(accuracy), Color.RED, loggable = True)
cprint("Saving neural network...", Color.GREEN)
neural_network.save("./data/networks/cnn_melanoma_simple_train.net")
cprint("Completed!", Color.PINK) | src/train.py | import torch
import warnings
import time
from dataset.fake_human.melanoma_fake_human_dataset import MelanomaFakeHumanDataset
from support import cprint, Color
from neural_network.convolutional_melanoma_neural_network import ConvolutionalMelanomaNeuralNetwork
warnings.filterwarnings("ignore", category=UserWarning)
# setting device
device = "cuda" if torch.cuda.is_available() else "cpu"
cprint("Running on {}...".format(device), Color.BLUE)
# defining fixed parameters
training_set_size = 10000 #TODO 10000
test_set_size = 900 #TODO 900
training_epochs = 100 #TODO 100
# defining base parameters (neural network)
batch_size = 4 #TODO 32
input_size = 512 * 512 * 3
output_size = 1
# defining Neural Network
neural_network = ConvolutionalMelanomaNeuralNetwork(device)
neural_network.criterion = torch.nn.MSELoss()
neural_network.optimizer = torch.optim.Adam(neural_network.parameters(), lr=1e-4, weight_decay=5e-4)
# defining dataset
dataset_fake_human = MelanomaFakeHumanDataset(training_set_size, 0, test_set_size, batch_size)
# printing statistics start neural network
loss, accuracy = neural_network.get_statistics(dataset_fake_human.get_metrics_dataloader())
cprint("Start loss: {}".format(loss), Color.RED, loggable = True)
cprint("Start accuracy: {}".format(accuracy), Color.RED, loggable = True)
# making training
cprint("Training neural network...", Color.GREEN)
start_time = time.time()
neural_network.fit(dataset_fake_human.get_dataloader(), training_epochs)
loss, accuracy = neural_network.get_statistics(dataset_fake_human.get_metrics_dataloader())
end_time = time.time()
# printing and saving results
cprint("Neural network training time: {}".format(end_time - start_time), Color.RED, loggable = True)
cprint("End loss: {}".format(loss), Color.RED, loggable = True)
cprint("End accuracy: {}".format(accuracy), Color.RED, loggable = True)
cprint("Saving neural network...", Color.GREEN)
neural_network.save("./data/networks/cnn_melanoma_simple_train.net")
cprint("Completed!", Color.PINK) | 0.331769 | 0.244138 |
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
import dbcrud as db
# Logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
#DRIVER PATH IS CHROMEDRIVER PATH
driver_path = "C:/Program Files/BraveSoftware/Brave-Browser/Application/chromedriver.exe"
#BRAVE PATH IS LOCATION OF ANY BROWSER'S .EXE FILE
brave_path = "C:/Program Files/BraveSoftware/Brave-Browser/Application/brave.exe"
option = webdriver.ChromeOptions()
option.binary_location = brave_path
option.add_argument('--headless')
option.add_argument('--disable-gpu')
# Command Handlers. Usually take two arguments: bot and update.
def start(update, context):
context.bot.send_message(chat_id=update.message.chat_id,text="Welcome to Attendance bot!!\nSend \n'/username username'\n'/password password'\nto store Id and password for future use\n/attendance to get attendance")
def username(update, context):
val = [update.message.chat_id, str(context.args[0])]
db.update_username(val[0],val[1])
context.bot.send_message(
chat_id=update.message.chat_id, text="Username saved")
def password(update, context):
val = [update.message.chat_id,str(context.args[0])]
db.update_password(val[0], val[1])
context.bot.send_message(
chat_id=update.message.chat_id, text="Password saved")
def attendance(update, context):
chat_id = update.message.chat_id
myresult =db.get_details(chat_id)
print(myresult)
if myresult[0] == None and myresult[1] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Username and Password not found\nSend \n'/username + username'\n'/password + password' to store Id and password for future use")
elif myresult[0] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Username not found\nSend \n'/username + username' to store Id and password for future use")
elif myresult[1] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Password not found\nSend'\n'/password + password' to store Id and password for future use")
else:
uname = myresult[0]
pword = myresult[1]
context.bot.send_message(
chat_id=update.message.chat_id, text="Please wait while I check your attendance")
# Create new Instance of Chrome
try:
browser = webdriver.Chrome(executable_path=driver_path, options=option)
browser.get("https://erp.ncuindia.edu/Welcome_iie.aspx")
username = browser.find_element_by_id("tbUserName")
username.send_keys(uname)
pas = browser.find_element_by_id("tbPassword")
pas.send_keys(<PASSWORD>)
pas.send_keys(Keys.RETURN)
try:
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, "aAttandance"))
)
aten = browser.find_element_by_id(
"aAttandance").get_attribute("href")
browser.get(aten)
check=True
try:
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located(
(By.XPATH, "//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr"))
)
tr = len(browser.find_elements_by_xpath(
"//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr"))
before_xpath = "//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr["
aftertd_xpath = "]/td"
data = ""
for t_tr in range(1, tr):
finalxpath = before_xpath + str(t_tr) + aftertd_xpath
cell_text = browser.find_elements_by_xpath(finalxpath)
data = data+cell_text[1].text+"--"+cell_text[6].text+"\n\n"
check=True
except:
check=False
except:
check=False
browser.quit()
except:
check=False
if check==False:
if db.get_attendance(chatid=chat_id) == None:
data= "Process failed"
else:
data = "Process failed\nSending the last saved attendance\n\n"+db.get_attendance(chatid=chat_id)
else:
db.update_attendance(chat_id, data)
# send the link back
context.bot.send_message(
chat_id=update.message.chat_id, text=data)
def main():
# Create updater and pass in Bot's auth key.
updater = Updater(
token='<KEY>', use_context=True)
# Get dispatcher to register handlers
dispatcher = updater.dispatcher
# answer commands
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('username', username))
dispatcher.add_handler(CommandHandler('password', password))
dispatcher.add_handler(CommandHandler('attendance', attendance))
# start the bot
updater.start_polling()
# Stop
updater.idle()
if __name__ == '__main__':
main() | bot.py | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import os
import dbcrud as db
# Logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
#DRIVER PATH IS CHROMEDRIVER PATH
driver_path = "C:/Program Files/BraveSoftware/Brave-Browser/Application/chromedriver.exe"
#BRAVE PATH IS LOCATION OF ANY BROWSER'S .EXE FILE
brave_path = "C:/Program Files/BraveSoftware/Brave-Browser/Application/brave.exe"
option = webdriver.ChromeOptions()
option.binary_location = brave_path
option.add_argument('--headless')
option.add_argument('--disable-gpu')
# Command Handlers. Usually take two arguments: bot and update.
def start(update, context):
context.bot.send_message(chat_id=update.message.chat_id,text="Welcome to Attendance bot!!\nSend \n'/username username'\n'/password password'\nto store Id and password for future use\n/attendance to get attendance")
def username(update, context):
val = [update.message.chat_id, str(context.args[0])]
db.update_username(val[0],val[1])
context.bot.send_message(
chat_id=update.message.chat_id, text="Username saved")
def password(update, context):
val = [update.message.chat_id,str(context.args[0])]
db.update_password(val[0], val[1])
context.bot.send_message(
chat_id=update.message.chat_id, text="Password saved")
def attendance(update, context):
chat_id = update.message.chat_id
myresult =db.get_details(chat_id)
print(myresult)
if myresult[0] == None and myresult[1] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Username and Password not found\nSend \n'/username + username'\n'/password + password' to store Id and password for future use")
elif myresult[0] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Username not found\nSend \n'/username + username' to store Id and password for future use")
elif myresult[1] == None:
context.bot.send_message(
chat_id=update.message.chat_id, text="Password not found\nSend'\n'/password + password' to store Id and password for future use")
else:
uname = myresult[0]
pword = myresult[1]
context.bot.send_message(
chat_id=update.message.chat_id, text="Please wait while I check your attendance")
# Create new Instance of Chrome
try:
browser = webdriver.Chrome(executable_path=driver_path, options=option)
browser.get("https://erp.ncuindia.edu/Welcome_iie.aspx")
username = browser.find_element_by_id("tbUserName")
username.send_keys(uname)
pas = browser.find_element_by_id("tbPassword")
pas.send_keys(<PASSWORD>)
pas.send_keys(Keys.RETURN)
try:
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.ID, "aAttandance"))
)
aten = browser.find_element_by_id(
"aAttandance").get_attribute("href")
browser.get(aten)
check=True
try:
element = WebDriverWait(browser, 10).until(
EC.presence_of_element_located(
(By.XPATH, "//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr"))
)
tr = len(browser.find_elements_by_xpath(
"//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr"))
before_xpath = "//*[@id='aspnetForm']/div[3]/div/div/div[2]/div/div/section/div/div[2]/table/tbody/tr["
aftertd_xpath = "]/td"
data = ""
for t_tr in range(1, tr):
finalxpath = before_xpath + str(t_tr) + aftertd_xpath
cell_text = browser.find_elements_by_xpath(finalxpath)
data = data+cell_text[1].text+"--"+cell_text[6].text+"\n\n"
check=True
except:
check=False
except:
check=False
browser.quit()
except:
check=False
if check==False:
if db.get_attendance(chatid=chat_id) == None:
data= "Process failed"
else:
data = "Process failed\nSending the last saved attendance\n\n"+db.get_attendance(chatid=chat_id)
else:
db.update_attendance(chat_id, data)
# send the link back
context.bot.send_message(
chat_id=update.message.chat_id, text=data)
def main():
# Create updater and pass in Bot's auth key.
updater = Updater(
token='<KEY>', use_context=True)
# Get dispatcher to register handlers
dispatcher = updater.dispatcher
# answer commands
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('username', username))
dispatcher.add_handler(CommandHandler('password', password))
dispatcher.add_handler(CommandHandler('attendance', attendance))
# start the bot
updater.start_polling()
# Stop
updater.idle()
if __name__ == '__main__':
main() | 0.17989 | 0.050098 |
import pandas as pd
import config
def generate_quality_df():
"""
generate dataframe for training and evaluating image quality model : only deepdr dataset
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
save : ./output/q_traindf.csv and ./output/q_testdf.csv
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
test_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
print(config.PATH_DISK)
print(config.PATH_VM)
print(train_csv)
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
testdf['im_path'] = test['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['im_quality'] = train['Overall quality'].astype('str')
testdf['im_quality'] = test['Overall quality'].astype('str')
# save output
traindf.to_csv(f'{config.PATH_VM}/data/output/q_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/q_testdf.csv')
#print(f'quality : total {traindf.shape[0] + testdf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('quality : total {}, train {}, test {}'.format(traindf.shape[0] + testdf.shape[0], traindf.shape[0], testdf.shape[0]))
def generate_diagnosis_df_deepdr():
"""
prepare dataframe for training diagnosis model : using deepdr data
Note : this dataframe from deepdr dataset will be merged with the one
from kaggle dataset, in kaggle dataset train and valid images were not
separated, therefore here also merge train and valid, after mering with
kaggle dataset train and valid will be splitted in model training part.
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
valid_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
train = pd.read_csv(train_csv)
valid = pd.read_csv(valid_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
validdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
validdf['im_path'] = valid['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['diagnosis'] = train['patient_DR_Level'].astype('str')
validdf['diagnosis'] = valid['patient_DR_Level'].astype('str')
return pd.concat([traindf, validdf])
def generate_diagnosis_df_kaggle():
""" prepare dataframe for training diagnosis model : using kaggle data"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/kaggle/train.csv'
test_csv = f'{config.PATH_DISK}/data/kaggle/test.csv'
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv) # only id no lable
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['id_code'].apply(lambda x : f'{config.PATH_DISK}/data/kaggle/train_images/'+x+'.png')
testdf['im_path'] = test['id_code'].apply(lambda x : f'{config.PATH_DISK}/data/kaggle/test_images/'+x+'.png')
traindf['diagnosis'] = train['diagnosis'].astype('str')
testdf['diagnosis'] = ''
# save kaggle diagnosis testdf
testdf.to_csv(f'{config.PATH_VM}/data/output/d_testdf_kaggle.csv')
return traindf
def generate_diagnosis_df():
""" combine diagnosis df from deepdr and kaggle
Note :
1) concat deepdr, kaggle df
2) train test split (shuffle)
"""
deepdrdf = generate_diagnosis_df_deepdr()
kaggledf = generate_diagnosis_df_kaggle()
mergedf = pd.concat([deepdrdf, kaggledf]).sample(frac=1).reset_index(drop=True) # shuffle
n = round(mergedf.shape[0] * 0.75)
traindf = mergedf.iloc[:n]
testdf = mergedf.iloc[n:]
#print(f'diagnosis : total {mergedf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('diagnosis : total {}, train {}, test {}'.format(mergedf.shape[0], traindf.shape[0], testdf.shape[0]))
traindf.to_csv(f'{config.PATH_VM}/data/output/d_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/d_testdf.csv')
if __name__ == "__main__":
generate_quality_df() # generate and save dataframes for quality check
generate_diagnosis_df() # merge and save dataframes for diagnosis from deepdr and kabble dataset | data_wrangling/wrangling.py | import pandas as pd
import config
def generate_quality_df():
"""
generate dataframe for training and evaluating image quality model : only deepdr dataset
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
save : ./output/q_traindf.csv and ./output/q_testdf.csv
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
test_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
print(config.PATH_DISK)
print(config.PATH_VM)
print(train_csv)
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
testdf['im_path'] = test['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['im_quality'] = train['Overall quality'].astype('str')
testdf['im_quality'] = test['Overall quality'].astype('str')
# save output
traindf.to_csv(f'{config.PATH_VM}/data/output/q_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/q_testdf.csv')
#print(f'quality : total {traindf.shape[0] + testdf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('quality : total {}, train {}, test {}'.format(traindf.shape[0] + testdf.shape[0], traindf.shape[0], testdf.shape[0]))
def generate_diagnosis_df_deepdr():
"""
prepare dataframe for training diagnosis model : using deepdr data
Note : this dataframe from deepdr dataset will be merged with the one
from kaggle dataset, in kaggle dataset train and valid images were not
separated, therefore here also merge train and valid, after mering with
kaggle dataset train and valid will be splitted in model training part.
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
valid_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
train = pd.read_csv(train_csv)
valid = pd.read_csv(valid_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
validdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
validdf['im_path'] = valid['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['diagnosis'] = train['patient_DR_Level'].astype('str')
validdf['diagnosis'] = valid['patient_DR_Level'].astype('str')
return pd.concat([traindf, validdf])
def generate_diagnosis_df_kaggle():
""" prepare dataframe for training diagnosis model : using kaggle data"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/kaggle/train.csv'
test_csv = f'{config.PATH_DISK}/data/kaggle/test.csv'
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv) # only id no lable
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['id_code'].apply(lambda x : f'{config.PATH_DISK}/data/kaggle/train_images/'+x+'.png')
testdf['im_path'] = test['id_code'].apply(lambda x : f'{config.PATH_DISK}/data/kaggle/test_images/'+x+'.png')
traindf['diagnosis'] = train['diagnosis'].astype('str')
testdf['diagnosis'] = ''
# save kaggle diagnosis testdf
testdf.to_csv(f'{config.PATH_VM}/data/output/d_testdf_kaggle.csv')
return traindf
def generate_diagnosis_df():
""" combine diagnosis df from deepdr and kaggle
Note :
1) concat deepdr, kaggle df
2) train test split (shuffle)
"""
deepdrdf = generate_diagnosis_df_deepdr()
kaggledf = generate_diagnosis_df_kaggle()
mergedf = pd.concat([deepdrdf, kaggledf]).sample(frac=1).reset_index(drop=True) # shuffle
n = round(mergedf.shape[0] * 0.75)
traindf = mergedf.iloc[:n]
testdf = mergedf.iloc[n:]
#print(f'diagnosis : total {mergedf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('diagnosis : total {}, train {}, test {}'.format(mergedf.shape[0], traindf.shape[0], testdf.shape[0]))
traindf.to_csv(f'{config.PATH_VM}/data/output/d_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/d_testdf.csv')
if __name__ == "__main__":
generate_quality_df() # generate and save dataframes for quality check
generate_diagnosis_df() # merge and save dataframes for diagnosis from deepdr and kabble dataset | 0.295738 | 0.424352 |
from collections import namedtuple
from typing import List
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from braces.views import LoginRequiredMixin, GroupRequiredMixin
from poetry.apps.corpus.models import Poem, MarkupVersion
from rupo.main.markup import Markup
def get_accents(markup: Markup):
accents = []
for line in markup.lines:
for word in line.words:
for syllable in word.syllables:
accents.append(syllable.stress != -1)
return accents
def get_accuracy(standard_accents: List[bool], test_accents: List[bool]):
l = min(len(standard_accents), len(test_accents))
hits = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent])
return float(hits) / l
def get_precision(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fp = sum([1 for accent in test_accents if accent == 1])
return float(tp) / tp_fp
def get_recall(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fn = sum([1 for accent in standard_accents if accent == 1])
return float(tp) / tp_fn
def get_comparison(poem, standard_pk, test_pk):
test_markup = None
standard_markup = None
for markup in poem.markups.all():
if markup.markup_version.pk == standard_pk:
standard_markup = markup
if markup.markup_version.pk == test_pk:
test_markup = markup
assert test_markup.get_markup().text == standard_markup.get_markup().text
standard_accents = get_accents(standard_markup.get_markup())
test_accents = get_accents(test_markup.get_markup())
accuracy = get_accuracy(standard_accents, test_accents)
precision = get_precision(standard_accents, test_accents)
recall = get_recall(standard_accents, test_accents)
f1 = 2*precision*recall/(precision+recall)
Comparison = namedtuple("Comparison", "poem test standard accuracy precision recall f1")
return Comparison(poem=poem, test=test_markup, standard=standard_markup, accuracy=accuracy,
precision=precision, recall=recall, f1=f1)
def get_all_comparisons(standard_pk, test_pk):
standard_markup_version = MarkupVersion.objects.get(pk=standard_pk)
poems = list(set([markup.poem for markup in standard_markup_version.markups.filter(
poem__markups__markup_version=test_pk)]))
return [get_comparison(poem, standard_pk, test_pk) for poem in poems]
class ComparisonView(LoginRequiredMixin, GroupRequiredMixin, TemplateView):
template_name = 'comparison.html'
group_required = "Approved"
def get_context_data(self, **kwargs):
context = super(ComparisonView, self).get_context_data(**kwargs)
test_pk = int(self.request.GET["test"])
standard_pk = int(self.request.GET["standard"])
document_pk = self.request.GET.get("document", None)
if document_pk is None:
comparisons = get_all_comparisons(standard_pk, test_pk)
else:
comparisons = [get_comparison(Poem.objects.get(pk=document_pk), standard_pk, test_pk)]
context["comparisons"] = comparisons
context["avg_accuracy"] = sum([comparison.accuracy for comparison in comparisons])/len(comparisons)
context["avg_f1"] = sum([comparison.f1 for comparison in comparisons]) / len(comparisons)
return context
class ComparisonCSVView(LoginRequiredMixin, GroupRequiredMixin, View):
group_required = "Approved"
def get(self, request, *args, **kwargs):
standard_pk = int(request.GET["standard"])
test_pk = int(request.GET["test"])
response = HttpResponse()
comparisons = get_all_comparisons(standard_pk, test_pk)
content = "poem,test,standard,accuracy,precision,recall,f1\n"
for comparison in comparisons:
content += ",".join([comparison.poem.name.replace(",", ""),
comparison.test.author.replace(",", ""),
comparison.standard.author.replace(",", ""),
"{:.3f}".format(comparison.accuracy),
"{:.3f}".format(comparison.precision),
"{:.3f}".format(comparison.recall),
"{:.3f}".format(comparison.f1)]) + "\n"
response.content = content
response["Content-Disposition"] = "attachment; filename={0}".format(
"comparison" + str(standard_pk) + "-" + str(test_pk) + ".csv")
return response | poetry/apps/corpus/views/comparison_view.py | from collections import namedtuple
from typing import List
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from braces.views import LoginRequiredMixin, GroupRequiredMixin
from poetry.apps.corpus.models import Poem, MarkupVersion
from rupo.main.markup import Markup
def get_accents(markup: Markup):
accents = []
for line in markup.lines:
for word in line.words:
for syllable in word.syllables:
accents.append(syllable.stress != -1)
return accents
def get_accuracy(standard_accents: List[bool], test_accents: List[bool]):
l = min(len(standard_accents), len(test_accents))
hits = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent])
return float(hits) / l
def get_precision(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fp = sum([1 for accent in test_accents if accent == 1])
return float(tp) / tp_fp
def get_recall(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fn = sum([1 for accent in standard_accents if accent == 1])
return float(tp) / tp_fn
def get_comparison(poem, standard_pk, test_pk):
test_markup = None
standard_markup = None
for markup in poem.markups.all():
if markup.markup_version.pk == standard_pk:
standard_markup = markup
if markup.markup_version.pk == test_pk:
test_markup = markup
assert test_markup.get_markup().text == standard_markup.get_markup().text
standard_accents = get_accents(standard_markup.get_markup())
test_accents = get_accents(test_markup.get_markup())
accuracy = get_accuracy(standard_accents, test_accents)
precision = get_precision(standard_accents, test_accents)
recall = get_recall(standard_accents, test_accents)
f1 = 2*precision*recall/(precision+recall)
Comparison = namedtuple("Comparison", "poem test standard accuracy precision recall f1")
return Comparison(poem=poem, test=test_markup, standard=standard_markup, accuracy=accuracy,
precision=precision, recall=recall, f1=f1)
def get_all_comparisons(standard_pk, test_pk):
standard_markup_version = MarkupVersion.objects.get(pk=standard_pk)
poems = list(set([markup.poem for markup in standard_markup_version.markups.filter(
poem__markups__markup_version=test_pk)]))
return [get_comparison(poem, standard_pk, test_pk) for poem in poems]
class ComparisonView(LoginRequiredMixin, GroupRequiredMixin, TemplateView):
template_name = 'comparison.html'
group_required = "Approved"
def get_context_data(self, **kwargs):
context = super(ComparisonView, self).get_context_data(**kwargs)
test_pk = int(self.request.GET["test"])
standard_pk = int(self.request.GET["standard"])
document_pk = self.request.GET.get("document", None)
if document_pk is None:
comparisons = get_all_comparisons(standard_pk, test_pk)
else:
comparisons = [get_comparison(Poem.objects.get(pk=document_pk), standard_pk, test_pk)]
context["comparisons"] = comparisons
context["avg_accuracy"] = sum([comparison.accuracy for comparison in comparisons])/len(comparisons)
context["avg_f1"] = sum([comparison.f1 for comparison in comparisons]) / len(comparisons)
return context
class ComparisonCSVView(LoginRequiredMixin, GroupRequiredMixin, View):
group_required = "Approved"
def get(self, request, *args, **kwargs):
standard_pk = int(request.GET["standard"])
test_pk = int(request.GET["test"])
response = HttpResponse()
comparisons = get_all_comparisons(standard_pk, test_pk)
content = "poem,test,standard,accuracy,precision,recall,f1\n"
for comparison in comparisons:
content += ",".join([comparison.poem.name.replace(",", ""),
comparison.test.author.replace(",", ""),
comparison.standard.author.replace(",", ""),
"{:.3f}".format(comparison.accuracy),
"{:.3f}".format(comparison.precision),
"{:.3f}".format(comparison.recall),
"{:.3f}".format(comparison.f1)]) + "\n"
response.content = content
response["Content-Disposition"] = "attachment; filename={0}".format(
"comparison" + str(standard_pk) + "-" + str(test_pk) + ".csv")
return response | 0.755997 | 0.430447 |
import logging
import math
from typing import Tuple, Dict
import numpy as np
from jgtextrank.utility import MultiprocPool
_logger = logging.getLogger("jgtextrank.metrics")
__author__ = '<NAME> <<EMAIL>>'
__all__ = ["_get_max_score", "_get_average_score", "_get_sum_score", "_term_size_normalize",
"_log_normalise", "_probability_density", "_gaussian_normalise", "_get_plus_score",
"TermGraphValue", "GCValue"]
def _get_max_score(all_syntactic_units, all_vertices):
"""
get max term unit score (normalised by term unit frequency in MWTs)
:param all_syntactic_units:
:param all_vertices:
:return:
"""
# print("all_vertices: ", all_vertices)
# print("collapsed_term: ", collapsed_term)
# max_score = max([all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in collapsed_term.split(' ')])
max_score = max(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
return max_score
def _get_average_score(all_syntactic_units, all_vertices, unit_size):
"""
get average score from single candidate term
:param all_syntactic_units: tokens of single candidate term
:param all_vertices: all the vertices used for computing combined weight
:param unit_size: size of multi-word candidate term
:return:
"""
avg_score = _get_sum_score(all_syntactic_units, all_vertices) / float(unit_size)
return avg_score
def _get_sum_score(all_syntactic_units, all_vertices):
return sum(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
def _term_size_normalize(base_score, unit_size):
return base_score / float(unit_size)
def _log_normalise(base_score, mu, unit_size):
if unit_size > 1:
# print("_log_normalise with mu=", mu, " , unit_size:", unit_size)
base_score = base_score / math.log(unit_size, mu)
return base_score
def _probability_density(x_value, mu, sigma):
"""
probability density of the normal distribution
see also https://en.wikipedia.org/wiki/Normal_distribution
:param x_value:
:param mu:
:param sigma:
:return:
"""
pd = (1 / (sigma * np.sqrt(2 * math.pi))) * math.exp(- math.pow((x_value - mu), 2) / (2 * math.pow(sigma, 2)))
return pd
def _gaussian_normalise(base_score, mu, sigma, unit_size):
"""
gaussian normalisation of 'base' weight
:param base_score: float, base weight of candidate terms
:param mu: int, mean value to set a center point (default to 5) in order to rank the candidates higher that are near the central point
This param is only required for normalisation based MWT weighting method
:param sigma: float64, standard deviation of term length in MWTs
:param unit_size: int, size of MWTs
:return:float
"""
norm_value = 1 - _probability_density(unit_size, mu, sigma)
return base_score * float(norm_value)
def _get_plus_score(all_syntactic_units, boosted_term_size_range, boosted_word_length_range, combined_weight,
unit_size):
"""
Experimental weighting method to provide extra small fraction weight to the final score
More weight can be given to longer term
:type all_syntactic_units: list (of str)
:param all_syntactic_units: all the tokens of a candidate term(SWT or MWT)
:type boosted_term_size_range: (int, int) | None
:param boosted_term_size_range: range of token size of a candidate term that will be boosted with a small weight fraction
:type boosted_word_length_range: (int, int) | None
:param boosted_word_length_range: range of word length (number of character) that will be boosted with a small weight fraction
:type combined_weight: float
:param combined_weight: combined the weight (i.e., 'avg' or 'max') of current candidate term
This weight is important and used as base value for final boosted weight
:type unit_size: int
:param unit_size: token size of current candidate term
:return: a small weight fraction that can be added to the final weight
"""
all_syntactic_units_lengths = [len(term_unit) for term_unit in all_syntactic_units]
min_word_length = min(all_syntactic_units_lengths)
max_word_length = max(all_syntactic_units_lengths)
avg_word_length = sum(all_syntactic_units_lengths) / unit_size
plus_weight = combined_weight
if boosted_word_length_range is not None and boosted_term_size_range is not None \
and unit_size in boosted_term_size_range and min_word_length in boosted_word_length_range \
and max_word_length in boosted_word_length_range:
# add a small fraction to the final weight when all the syntactic unit length in in a normal range
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is None and boosted_term_size_range is not None and unit_size in boosted_term_size_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is not None and boosted_term_size_range is None and \
min_word_length in boosted_word_length_range and max_word_length in boosted_word_length_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
return plus_weight
class TermGraphValue(object):
"""
Metrics to weigh Multi-Word Terms(MWTs)
"""
def __init__(self, weight_comb="norm_max", mu=5, parallel_workers=1):
self._logger = logging.getLogger("jgtextrank.metrics")
self._logger.info(self.__class__.__name__)
self.parallel_workers = parallel_workers
self.weight_comb = weight_comb
self.mu = mu
@staticmethod
def g_value(collapsed_term, all_vertices, weight_comb="norm_sum", mu=5, **kwargs):
final_score = float(0)
log2a = 0
avg_score = 0
sum_score = 0
max_score = 0
sigma = 0
if "sigma" in kwargs:
sigma = kwargs["sigma"]
# compute term length (i.e.,number of words/tokens)
# all_syntactic_units = collapsed_term.split(' ')
all_syntactic_units = collapsed_term
unit_size = len(all_syntactic_units)
if "len_log" in weight_comb:
# log(a + 0.1) to smooth unigrams
log2a = math.log2(unit_size + 0.1)
if "avg" in weight_comb:
avg_score = _get_average_score(all_syntactic_units, all_vertices, unit_size)
if "sum" in weight_comb:
sum_score = _get_sum_score(all_syntactic_units, all_vertices)
if "max" in weight_comb:
max_score = _get_max_score(all_syntactic_units, all_vertices)
if weight_comb == "avg":
final_score = avg_score
elif weight_comb == "norm_avg":
final_score = _term_size_normalize(avg_score, unit_size)
elif weight_comb == "log_norm_avg":
final_score = _log_normalise(avg_score, mu, unit_size)
elif weight_comb == "gaussian_norm_avg":
final_score = _gaussian_normalise(avg_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_avg":
final_score = log2a * avg_score
elif weight_comb == "sum":
final_score = sum_score
elif weight_comb == "norm_sum":
final_score = _term_size_normalize(sum_score, unit_size)
elif weight_comb == "log_norm_sum":
final_score = _log_normalise(sum_score, mu, unit_size)
elif weight_comb == "gaussian_norm_sum":
final_score = _gaussian_normalise(sum_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_sum":
final_score = log2a * sum_score
elif weight_comb == "max":
final_score = max_score
elif weight_comb == "norm_max":
final_score = _term_size_normalize(max_score, unit_size)
elif weight_comb == "log_norm_max":
final_score = _log_normalise(max_score, mu, unit_size)
elif weight_comb == "gaussian_norm_max":
final_score = _gaussian_normalise(max_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_max":
final_score = log2a * max_score
else:
raise ValueError("Unsupported weight combination option: '%s'", weight_comb)
return round(final_score, 5)
def _is_top_t_vertices_connection(self, collapsed_term, top_t_vertices):
"""
:type collapsed_term: list [of list [of string]]
:param collapsed_term: list of tokenised terms collapsed from original context that will form Single-word term or Multi-word Term
:param top_t_vertices: top T weighted vertices
:return: True if the input contains any of top T vertex
"""
return any(top_t_vertex[0] in collapsed_term for top_t_vertex in top_t_vertices)
def _concatenate_terms(self, weighted_candidates) -> Dict[str, float]:
return dict((" ".join(tokenised_term), score) for tokenised_term, score in weighted_candidates)
def _get_sigma_from_all_candidates(self, collapsed_terms):
"""
compute standard deviation of term length in MWTs
:param collapsed_terms: list, list of tokenised terms
:rtype: ndarray
:return: standard_deviation
"""
all_terms_size = [len(collapsed_term) for collapsed_term in collapsed_terms]
return np.std(all_terms_size)
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
sigma = 0
if "norm" in self.weight_comb:
sigma = self._get_sigma_from_all_candidates(all_candidates)
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
optional_params = dict()
optional_params["weight_comb"] = self.weight_comb
optional_params["mu"] = self.mu
if sigma != 0:
optional_params["sigma"] = sigma
weighted_all_candidates = pool.starmap(TermGraphValue.calculate,
[(candidate, all_candidates, all_vertices, optional_params) for
candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
return self._concatenate_terms(weighted_all_candidates)
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
weight_comb = "norm_max"
if "weight_comb" in optional_params:
weight_comb = optional_params["weight_comb"]
mu = 5
if "mu" in optional_params:
mu = optional_params["mu"]
sigma = 0
if "sigma" in optional_params:
sigma = optional_params["sigma"]
final_score = TermGraphValue.g_value(candidate_term, all_vertices,
weight_comb, mu, sigma=sigma)
return (candidate_term, final_score)
class GCValue(TermGraphValue):
"""
Experimental metrics to weight MWTs
"""
def __init__(self, weight_comb="len_log_norm_avg", mu=5, parallel_workers=1):
super().__init__(weight_comb, mu, parallel_workers)
@staticmethod
def _get_longer_terms(term, all_candidates):
"""
the number of candidate terms that contain current term
Simply term normalisation is applied. Could be extended with "solr_term_normaliser"
params:
term, current term tokens
all candidates: all candidates
return longer term list
"""
try:
return [longer_term for longer_term in all_candidates
if term != longer_term and set(term).issubset(set(longer_term))]
except AttributeError:
import traceback
_logger.error(traceback.format_exc())
_logger.error("AttributeError when processing candidate term [%s]", term)
return []
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
weighted_all_candidates = pool.starmap(GCValue.calculate,
[(candidate, all_candidates, all_vertices) for candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
self._logger.info(" all candidates gc-value computation is completed.")
return super()._concatenate_terms(weighted_all_candidates)
@staticmethod
def _sum_ga_candidates(candidate_list, all_vertices):
return sum([TermGraphValue.g_value(candidate, all_vertices, weight_comb="len_log_norm_avg") for candidate in
candidate_list])
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
longer_terms = GCValue._get_longer_terms(candidate_term, all_candidates)
a = len(candidate_term)
# log(a + 0.1) for unigrams smoothing
log2a = math.log(a + 0.1, 2)
g_a = TermGraphValue.g_value(candidate_term, all_vertices, weight_comb="len_log_norm_avg")
if longer_terms:
p_ta = len(longer_terms)
sum_gb = GCValue._sum_ga_candidates(longer_terms, all_vertices)
term_gcvalue = log2a * (g_a - (1 / p_ta) * sum_gb)
else:
term_gcvalue = log2a * g_a
return (candidate_term, round(term_gcvalue, 5)) | jgtextrank/metrics.py |
import logging
import math
from typing import Tuple, Dict
import numpy as np
from jgtextrank.utility import MultiprocPool
_logger = logging.getLogger("jgtextrank.metrics")
__author__ = '<NAME> <<EMAIL>>'
__all__ = ["_get_max_score", "_get_average_score", "_get_sum_score", "_term_size_normalize",
"_log_normalise", "_probability_density", "_gaussian_normalise", "_get_plus_score",
"TermGraphValue", "GCValue"]
def _get_max_score(all_syntactic_units, all_vertices):
"""
get max term unit score (normalised by term unit frequency in MWTs)
:param all_syntactic_units:
:param all_vertices:
:return:
"""
# print("all_vertices: ", all_vertices)
# print("collapsed_term: ", collapsed_term)
# max_score = max([all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in collapsed_term.split(' ')])
max_score = max(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
return max_score
def _get_average_score(all_syntactic_units, all_vertices, unit_size):
"""
get average score from single candidate term
:param all_syntactic_units: tokens of single candidate term
:param all_vertices: all the vertices used for computing combined weight
:param unit_size: size of multi-word candidate term
:return:
"""
avg_score = _get_sum_score(all_syntactic_units, all_vertices) / float(unit_size)
return avg_score
def _get_sum_score(all_syntactic_units, all_vertices):
return sum(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
def _term_size_normalize(base_score, unit_size):
return base_score / float(unit_size)
def _log_normalise(base_score, mu, unit_size):
if unit_size > 1:
# print("_log_normalise with mu=", mu, " , unit_size:", unit_size)
base_score = base_score / math.log(unit_size, mu)
return base_score
def _probability_density(x_value, mu, sigma):
"""
probability density of the normal distribution
see also https://en.wikipedia.org/wiki/Normal_distribution
:param x_value:
:param mu:
:param sigma:
:return:
"""
pd = (1 / (sigma * np.sqrt(2 * math.pi))) * math.exp(- math.pow((x_value - mu), 2) / (2 * math.pow(sigma, 2)))
return pd
def _gaussian_normalise(base_score, mu, sigma, unit_size):
"""
gaussian normalisation of 'base' weight
:param base_score: float, base weight of candidate terms
:param mu: int, mean value to set a center point (default to 5) in order to rank the candidates higher that are near the central point
This param is only required for normalisation based MWT weighting method
:param sigma: float64, standard deviation of term length in MWTs
:param unit_size: int, size of MWTs
:return:float
"""
norm_value = 1 - _probability_density(unit_size, mu, sigma)
return base_score * float(norm_value)
def _get_plus_score(all_syntactic_units, boosted_term_size_range, boosted_word_length_range, combined_weight,
unit_size):
"""
Experimental weighting method to provide extra small fraction weight to the final score
More weight can be given to longer term
:type all_syntactic_units: list (of str)
:param all_syntactic_units: all the tokens of a candidate term(SWT or MWT)
:type boosted_term_size_range: (int, int) | None
:param boosted_term_size_range: range of token size of a candidate term that will be boosted with a small weight fraction
:type boosted_word_length_range: (int, int) | None
:param boosted_word_length_range: range of word length (number of character) that will be boosted with a small weight fraction
:type combined_weight: float
:param combined_weight: combined the weight (i.e., 'avg' or 'max') of current candidate term
This weight is important and used as base value for final boosted weight
:type unit_size: int
:param unit_size: token size of current candidate term
:return: a small weight fraction that can be added to the final weight
"""
all_syntactic_units_lengths = [len(term_unit) for term_unit in all_syntactic_units]
min_word_length = min(all_syntactic_units_lengths)
max_word_length = max(all_syntactic_units_lengths)
avg_word_length = sum(all_syntactic_units_lengths) / unit_size
plus_weight = combined_weight
if boosted_word_length_range is not None and boosted_term_size_range is not None \
and unit_size in boosted_term_size_range and min_word_length in boosted_word_length_range \
and max_word_length in boosted_word_length_range:
# add a small fraction to the final weight when all the syntactic unit length in in a normal range
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is None and boosted_term_size_range is not None and unit_size in boosted_term_size_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is not None and boosted_term_size_range is None and \
min_word_length in boosted_word_length_range and max_word_length in boosted_word_length_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
return plus_weight
class TermGraphValue(object):
"""
Metrics to weigh Multi-Word Terms(MWTs)
"""
def __init__(self, weight_comb="norm_max", mu=5, parallel_workers=1):
self._logger = logging.getLogger("jgtextrank.metrics")
self._logger.info(self.__class__.__name__)
self.parallel_workers = parallel_workers
self.weight_comb = weight_comb
self.mu = mu
@staticmethod
def g_value(collapsed_term, all_vertices, weight_comb="norm_sum", mu=5, **kwargs):
final_score = float(0)
log2a = 0
avg_score = 0
sum_score = 0
max_score = 0
sigma = 0
if "sigma" in kwargs:
sigma = kwargs["sigma"]
# compute term length (i.e.,number of words/tokens)
# all_syntactic_units = collapsed_term.split(' ')
all_syntactic_units = collapsed_term
unit_size = len(all_syntactic_units)
if "len_log" in weight_comb:
# log(a + 0.1) to smooth unigrams
log2a = math.log2(unit_size + 0.1)
if "avg" in weight_comb:
avg_score = _get_average_score(all_syntactic_units, all_vertices, unit_size)
if "sum" in weight_comb:
sum_score = _get_sum_score(all_syntactic_units, all_vertices)
if "max" in weight_comb:
max_score = _get_max_score(all_syntactic_units, all_vertices)
if weight_comb == "avg":
final_score = avg_score
elif weight_comb == "norm_avg":
final_score = _term_size_normalize(avg_score, unit_size)
elif weight_comb == "log_norm_avg":
final_score = _log_normalise(avg_score, mu, unit_size)
elif weight_comb == "gaussian_norm_avg":
final_score = _gaussian_normalise(avg_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_avg":
final_score = log2a * avg_score
elif weight_comb == "sum":
final_score = sum_score
elif weight_comb == "norm_sum":
final_score = _term_size_normalize(sum_score, unit_size)
elif weight_comb == "log_norm_sum":
final_score = _log_normalise(sum_score, mu, unit_size)
elif weight_comb == "gaussian_norm_sum":
final_score = _gaussian_normalise(sum_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_sum":
final_score = log2a * sum_score
elif weight_comb == "max":
final_score = max_score
elif weight_comb == "norm_max":
final_score = _term_size_normalize(max_score, unit_size)
elif weight_comb == "log_norm_max":
final_score = _log_normalise(max_score, mu, unit_size)
elif weight_comb == "gaussian_norm_max":
final_score = _gaussian_normalise(max_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_max":
final_score = log2a * max_score
else:
raise ValueError("Unsupported weight combination option: '%s'", weight_comb)
return round(final_score, 5)
def _is_top_t_vertices_connection(self, collapsed_term, top_t_vertices):
"""
:type collapsed_term: list [of list [of string]]
:param collapsed_term: list of tokenised terms collapsed from original context that will form Single-word term or Multi-word Term
:param top_t_vertices: top T weighted vertices
:return: True if the input contains any of top T vertex
"""
return any(top_t_vertex[0] in collapsed_term for top_t_vertex in top_t_vertices)
def _concatenate_terms(self, weighted_candidates) -> Dict[str, float]:
return dict((" ".join(tokenised_term), score) for tokenised_term, score in weighted_candidates)
def _get_sigma_from_all_candidates(self, collapsed_terms):
"""
compute standard deviation of term length in MWTs
:param collapsed_terms: list, list of tokenised terms
:rtype: ndarray
:return: standard_deviation
"""
all_terms_size = [len(collapsed_term) for collapsed_term in collapsed_terms]
return np.std(all_terms_size)
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
sigma = 0
if "norm" in self.weight_comb:
sigma = self._get_sigma_from_all_candidates(all_candidates)
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
optional_params = dict()
optional_params["weight_comb"] = self.weight_comb
optional_params["mu"] = self.mu
if sigma != 0:
optional_params["sigma"] = sigma
weighted_all_candidates = pool.starmap(TermGraphValue.calculate,
[(candidate, all_candidates, all_vertices, optional_params) for
candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
return self._concatenate_terms(weighted_all_candidates)
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
weight_comb = "norm_max"
if "weight_comb" in optional_params:
weight_comb = optional_params["weight_comb"]
mu = 5
if "mu" in optional_params:
mu = optional_params["mu"]
sigma = 0
if "sigma" in optional_params:
sigma = optional_params["sigma"]
final_score = TermGraphValue.g_value(candidate_term, all_vertices,
weight_comb, mu, sigma=sigma)
return (candidate_term, final_score)
class GCValue(TermGraphValue):
"""
Experimental metrics to weight MWTs
"""
def __init__(self, weight_comb="len_log_norm_avg", mu=5, parallel_workers=1):
super().__init__(weight_comb, mu, parallel_workers)
@staticmethod
def _get_longer_terms(term, all_candidates):
"""
the number of candidate terms that contain current term
Simply term normalisation is applied. Could be extended with "solr_term_normaliser"
params:
term, current term tokens
all candidates: all candidates
return longer term list
"""
try:
return [longer_term for longer_term in all_candidates
if term != longer_term and set(term).issubset(set(longer_term))]
except AttributeError:
import traceback
_logger.error(traceback.format_exc())
_logger.error("AttributeError when processing candidate term [%s]", term)
return []
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
weighted_all_candidates = pool.starmap(GCValue.calculate,
[(candidate, all_candidates, all_vertices) for candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
self._logger.info(" all candidates gc-value computation is completed.")
return super()._concatenate_terms(weighted_all_candidates)
@staticmethod
def _sum_ga_candidates(candidate_list, all_vertices):
return sum([TermGraphValue.g_value(candidate, all_vertices, weight_comb="len_log_norm_avg") for candidate in
candidate_list])
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
longer_terms = GCValue._get_longer_terms(candidate_term, all_candidates)
a = len(candidate_term)
# log(a + 0.1) for unigrams smoothing
log2a = math.log(a + 0.1, 2)
g_a = TermGraphValue.g_value(candidate_term, all_vertices, weight_comb="len_log_norm_avg")
if longer_terms:
p_ta = len(longer_terms)
sum_gb = GCValue._sum_ga_candidates(longer_terms, all_vertices)
term_gcvalue = log2a * (g_a - (1 / p_ta) * sum_gb)
else:
term_gcvalue = log2a * g_a
return (candidate_term, round(term_gcvalue, 5)) | 0.745028 | 0.460471 |
from unittest import TestCase
from ghia.github import GitHub
from betamax import Betamax
from betamax.cassette import cassette
import os
import pathlib
from ghia.common import GHIA, get_rules
import importlib
import pkg_resources
import click
from ghia.Helpers import Parser
TOKEN_PLACEHOLDER = '<AUTH_TOKEN>'
TOKEN = os.getenv('GITHUB_TOKEN', default=TOKEN_PLACEHOLDER)
USER_PLACEHOLDER = 'USER_PLACEHOLDER'
USER = os.getenv('GITHUB_USER', default=USER_PLACEHOLDER)
TOKEN_HEADER = 'token ' + TOKEN
REPO = 'ghia_test_env'
def sanitize_token(interaction, current_cassette):
headers = interaction.data['request']['headers']
token = headers.get("Authorization")
if token is None:
return
current_cassette.placeholders.append(
cassette.Placeholder(placeholder=TOKEN_PLACEHOLDER, replace=token[0])
)
current_cassette.placeholders.append(
cassette.Placeholder(placeholder=USER_PLACEHOLDER, replace=USER)
)
def sanitize_before_playback(interaction, current_cassette):
interaction.replace(USER_PLACEHOLDER, USER)
with Betamax.configure() as config:
config.cassette_library_dir = pathlib.Path(__file__).parent / 'cassettes'
config.define_cassette_placeholder(TOKEN_PLACEHOLDER, TOKEN_HEADER)
config.before_record(callback=sanitize_token)
config.before_playback(callback=sanitize_before_playback)
def configPath(name):
return pathlib.Path(__file__).parent / 'rules' / name
class TestGit(TestCase):
def test_list(self):
github = GitHub(TOKEN)
with Betamax(github.session).use_cassette('issues'):
issues = github.issues(USER, REPO)
assert len(issues) == 116
def test_assign(self):
github = GitHub(TOKEN)
with Betamax(github.session).use_cassette('assignees'):
github.set_issue_assignees(USER, REPO, 124, [USER])
issues = github.issues(USER, REPO, assignee=USER)
for issue in issues:
if issue['number'] == 124:
assert issue['assignees'][0]['login'] == USER
break
def test_apply_rules(self):
with open(configPath('rules.match_label.cfg'), 'r') as conf:
rules, fallback = get_rules(None, None, conf)
ghia = GHIA(TOKEN, rules, fallback, False, GHIA.DEFAULT_STRATEGY)
with Betamax(ghia.github.session).use_cassette('test_apply_rules'):
ghia.run(USER, REPO)
issues = ghia.github.issues(USER, REPO, assignee=USER)
assert len(issues) == 14
class TestCli(TestCase):
def test_multiple(self):
ret = Parser.parse_reposlug(None, None, 'somone/repo1, somenone/repo2')
assert len(ret) == 2
assert ret[0][0] == 'somone'
assert ret[0][1] == 'repo1'
assert ret[1][0] == 'somenone'
assert ret[1][1] == 'repo2'
def test_invalid(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(None, None, 'somone/repo1,')
def test_invalid_2(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(None, None, 'somone/repo1,somone/repo1,')
def test_invalid_3(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(
None,
None,
'somone/repo1,'
'somone/repo1,'
'somone/repo1,'
)
def test_invalid_3(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(
None,
None,
'somone/r,epo1,somone/repo1,somone/repo1,'
) | my_tests/test.py | from unittest import TestCase
from ghia.github import GitHub
from betamax import Betamax
from betamax.cassette import cassette
import os
import pathlib
from ghia.common import GHIA, get_rules
import importlib
import pkg_resources
import click
from ghia.Helpers import Parser
TOKEN_PLACEHOLDER = '<AUTH_TOKEN>'
TOKEN = os.getenv('GITHUB_TOKEN', default=TOKEN_PLACEHOLDER)
USER_PLACEHOLDER = 'USER_PLACEHOLDER'
USER = os.getenv('GITHUB_USER', default=USER_PLACEHOLDER)
TOKEN_HEADER = 'token ' + TOKEN
REPO = 'ghia_test_env'
def sanitize_token(interaction, current_cassette):
headers = interaction.data['request']['headers']
token = headers.get("Authorization")
if token is None:
return
current_cassette.placeholders.append(
cassette.Placeholder(placeholder=TOKEN_PLACEHOLDER, replace=token[0])
)
current_cassette.placeholders.append(
cassette.Placeholder(placeholder=USER_PLACEHOLDER, replace=USER)
)
def sanitize_before_playback(interaction, current_cassette):
interaction.replace(USER_PLACEHOLDER, USER)
with Betamax.configure() as config:
config.cassette_library_dir = pathlib.Path(__file__).parent / 'cassettes'
config.define_cassette_placeholder(TOKEN_PLACEHOLDER, TOKEN_HEADER)
config.before_record(callback=sanitize_token)
config.before_playback(callback=sanitize_before_playback)
def configPath(name):
return pathlib.Path(__file__).parent / 'rules' / name
class TestGit(TestCase):
def test_list(self):
github = GitHub(TOKEN)
with Betamax(github.session).use_cassette('issues'):
issues = github.issues(USER, REPO)
assert len(issues) == 116
def test_assign(self):
github = GitHub(TOKEN)
with Betamax(github.session).use_cassette('assignees'):
github.set_issue_assignees(USER, REPO, 124, [USER])
issues = github.issues(USER, REPO, assignee=USER)
for issue in issues:
if issue['number'] == 124:
assert issue['assignees'][0]['login'] == USER
break
def test_apply_rules(self):
with open(configPath('rules.match_label.cfg'), 'r') as conf:
rules, fallback = get_rules(None, None, conf)
ghia = GHIA(TOKEN, rules, fallback, False, GHIA.DEFAULT_STRATEGY)
with Betamax(ghia.github.session).use_cassette('test_apply_rules'):
ghia.run(USER, REPO)
issues = ghia.github.issues(USER, REPO, assignee=USER)
assert len(issues) == 14
class TestCli(TestCase):
def test_multiple(self):
ret = Parser.parse_reposlug(None, None, 'somone/repo1, somenone/repo2')
assert len(ret) == 2
assert ret[0][0] == 'somone'
assert ret[0][1] == 'repo1'
assert ret[1][0] == 'somenone'
assert ret[1][1] == 'repo2'
def test_invalid(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(None, None, 'somone/repo1,')
def test_invalid_2(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(None, None, 'somone/repo1,somone/repo1,')
def test_invalid_3(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(
None,
None,
'somone/repo1,'
'somone/repo1,'
'somone/repo1,'
)
def test_invalid_3(self):
with self.assertRaises(click.BadParameter):
Parser.parse_reposlug(
None,
None,
'somone/r,epo1,somone/repo1,somone/repo1,'
) | 0.384334 | 0.233106 |
import logging
import pprint
import werkzeug
import json
from odoo import http
from odoo.http import request
from odoo.http import Response
from odoo.addons.payment.models.payment_acquirer import ValidationError
_logger = logging.getLogger(__name__)
class WompiColController(http.Controller):
@http.route(['/payment/wompicol/response',
'/payment/wompicol_test/response'],
type='json', auth='public', csrf=False)
def wompicol_response(self):
""" Wompi Colombia """
# Wompi servers will post the event information
# {
# "event": "transaction.updated",
# "data": {
# "transaction": {
# "id": "01-1532941443-49201",
# "amount_in_cents": 4490000,
# "reference": "MZQ3X2DE2SMX",
# "customer_email": "<EMAIL>",
# "currency": "COP",
# "payment_method_type": "NEQUI",
# "redirect_url": "https://mitienda.com.co/pagos/redireccion",
# "status": "APPROVED",
# "shipping_address": null,
# "payment_link_id": null,
# "payment_source_id": null
# }
# },
# "sent_at": "2018-07-20T16:45:05.000Z"
# }
post = json.loads(request.httprequest.data)
if post:
# If entered on the test endpoint, let's add it to the data
if 'wompicol_test' in request.httprequest.path:
post["test"] = 1
# Log the event data
_logger.info(
'Wompicol: entering form_feedback with post response data %s',
pprint.pformat(post))
ref = post.get('data', {}).get('transaction', {}).get('reference', {})
if '_' in ref:
post['data']['transaction']['reference'] = ref.split('_')[0]
if post.get('noconfirm'):
raise ValidationError('Wompicol: should not receive "noconfirm" on the controller')
# Process the data
request.env['payment.transaction'].sudo().form_feedback(
post,
'wompicol')
else:
_logger.info(
'Wompicol: for feedback entered with incomplete data %s',
pprint.pformat(post))
return werkzeug.utils.redirect('/')
@http.route('/payment/wompicol/client_return', type='http',
auth='public', csrf=False)
def wompicol_client_return(self, **post):
""" Wompi Colombia """
# The client browser will comeback with the following data
# {
# 'env': 'test',
# 'id': '16056-1597266116-33603'
# }
_logger.info('Wompicol: client browser returning. %s',
pprint.pformat(post))
if post:
id = post.get('id')
env = post.get('env')
env = env if env == 'test' else 'prod'
# Process the data
request.env[
'payment.transaction'
].sudo()._wompicol_get_data_manually(id, env)
return werkzeug.utils.redirect('/payment/process') | controllers/main.py | import logging
import pprint
import werkzeug
import json
from odoo import http
from odoo.http import request
from odoo.http import Response
from odoo.addons.payment.models.payment_acquirer import ValidationError
_logger = logging.getLogger(__name__)
class WompiColController(http.Controller):
@http.route(['/payment/wompicol/response',
'/payment/wompicol_test/response'],
type='json', auth='public', csrf=False)
def wompicol_response(self):
""" Wompi Colombia """
# Wompi servers will post the event information
# {
# "event": "transaction.updated",
# "data": {
# "transaction": {
# "id": "01-1532941443-49201",
# "amount_in_cents": 4490000,
# "reference": "MZQ3X2DE2SMX",
# "customer_email": "<EMAIL>",
# "currency": "COP",
# "payment_method_type": "NEQUI",
# "redirect_url": "https://mitienda.com.co/pagos/redireccion",
# "status": "APPROVED",
# "shipping_address": null,
# "payment_link_id": null,
# "payment_source_id": null
# }
# },
# "sent_at": "2018-07-20T16:45:05.000Z"
# }
post = json.loads(request.httprequest.data)
if post:
# If entered on the test endpoint, let's add it to the data
if 'wompicol_test' in request.httprequest.path:
post["test"] = 1
# Log the event data
_logger.info(
'Wompicol: entering form_feedback with post response data %s',
pprint.pformat(post))
ref = post.get('data', {}).get('transaction', {}).get('reference', {})
if '_' in ref:
post['data']['transaction']['reference'] = ref.split('_')[0]
if post.get('noconfirm'):
raise ValidationError('Wompicol: should not receive "noconfirm" on the controller')
# Process the data
request.env['payment.transaction'].sudo().form_feedback(
post,
'wompicol')
else:
_logger.info(
'Wompicol: for feedback entered with incomplete data %s',
pprint.pformat(post))
return werkzeug.utils.redirect('/')
@http.route('/payment/wompicol/client_return', type='http',
auth='public', csrf=False)
def wompicol_client_return(self, **post):
""" Wompi Colombia """
# The client browser will comeback with the following data
# {
# 'env': 'test',
# 'id': '16056-1597266116-33603'
# }
_logger.info('Wompicol: client browser returning. %s',
pprint.pformat(post))
if post:
id = post.get('id')
env = post.get('env')
env = env if env == 'test' else 'prod'
# Process the data
request.env[
'payment.transaction'
].sudo()._wompicol_get_data_manually(id, env)
return werkzeug.utils.redirect('/payment/process') | 0.321034 | 0.08617 |
from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
"""
@login_manager.user_loader Passes in a user_id to this function and in return the
function queries the database and gets a user's id as a response...
"""
return User.query.get(int(user_id))
# 1. USER CLASS
class User(UserMixin, db.Model):
"""
class modelling the users
"""
__tablename__ = 'users'
# creation of the user columns
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), index=True)
email = db.Column(db.String(255), unique=True, index=True)
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pitches = db.relationship("Pitch", backref="user", lazy="dynamic")
comment = db.relationship("Comments", backref="user", lazy="dynamic")
vote = db.relationship("Votes", backref="user", lazy="dynamic")
# function for securing the users passwords
@property
def password(self):
raise AttributeError('You can not read the password Attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'User {self.username}'
# 2. class pitches
class Pitch(db.Model):
"""
Class pitch is a class that lists of pitches in each category.
"""
__tablename__ = 'pitches'
# id = db.Column(db.Integer,primary_key = True)
# content = db.Column(db.String())
# category_id = db.Column(db.Integer, db.ForeignKey("categories.id"))
# user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
id = db.Column(db.Integer, primary_key=True)
pitch_id = db.Column(db.Integer)
pitch_title = db.Column(db.String)
pitch_category = db.Column(db.String)
pitch_comment = db.Column(db.String)
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
upvotes = db.Column(db.Integer)
downvotes = db.Column(db.Integer)
comment = db.relationship("Comments", backref="pitches", lazy="dynamic")
vote = db.relationship("Votes", backref="pitches", lazy="dynamic")
def save_pitch(self):
"""
Save the pitches
"""
db.session.add(self)
db.session.commit()
# displaying of pitches
@classmethod
def get_pitches(cls, category):
pitches = Pitch.query.filter_by(pitch_category=category).all()
return pitches
@classmethod
def getPitchId(cls, id):
pitch = Pitch.query.filter_by(id=id).first()
return pitch
# 3. CLASS COMMENT
class Comments(db.Model):
"""
Comments Class comment is model for each pitch
"""
__tablename__ = 'comments'
# adding columns for our comments
id = db.Column(db. Integer, primary_key=True)
opinion = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
pitches_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
def save_comment(self):
"""
Save the Comments as per every pitch idea
"""
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(self, id):
comment = Comments.query.order_by(
)
return comment
# 4. CLASS CATEGORY
class Category(db.Model):
__tablename__ = 'categories'
# table columns
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
description = db.Column(db.String(255))
# saving pitches
def save_category(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_categories(cls):
categories = Category.query.all()
return categories
# 5. VOTES CLASS
class Votes (db.Model):
"""
Class votes is a class which will be used to create the upvotes and downvotes for the pitches
"""
__tablename__ = 'votes'
id = db.Column(db. Integer, primary_key=True)
vote = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
pitches_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
def save_vote(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_votes(cls, user_id, pitches_id):
votes = Votes.query.filter_by(
user_id=user_id, pitches_id=pitches_id).all()
return votes
def __repr__(self):
return f'{self.vote}:{self.user_id}:{self.pitches_id}'
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_upvotes(cls,id):
upvote = Upvote.query.filter_by(pitch_id=id).all()
return upvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
class Downvote(db.Model):
__tablename__ = 'downvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_downvotes(cls,id):
downvote = Downvote.query.filter_by(pitch_id=id).all()
return downvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id) | app/models.py | from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
"""
@login_manager.user_loader Passes in a user_id to this function and in return the
function queries the database and gets a user's id as a response...
"""
return User.query.get(int(user_id))
# 1. USER CLASS
class User(UserMixin, db.Model):
"""
class modelling the users
"""
__tablename__ = 'users'
# creation of the user columns
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), index=True)
email = db.Column(db.String(255), unique=True, index=True)
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pitches = db.relationship("Pitch", backref="user", lazy="dynamic")
comment = db.relationship("Comments", backref="user", lazy="dynamic")
vote = db.relationship("Votes", backref="user", lazy="dynamic")
# function for securing the users passwords
@property
def password(self):
raise AttributeError('You can not read the password Attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'User {self.username}'
# 2. class pitches
class Pitch(db.Model):
"""
Class pitch is a class that lists of pitches in each category.
"""
__tablename__ = 'pitches'
# id = db.Column(db.Integer,primary_key = True)
# content = db.Column(db.String())
# category_id = db.Column(db.Integer, db.ForeignKey("categories.id"))
# user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
id = db.Column(db.Integer, primary_key=True)
pitch_id = db.Column(db.Integer)
pitch_title = db.Column(db.String)
pitch_category = db.Column(db.String)
pitch_comment = db.Column(db.String)
posted = db.Column(db.DateTime, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
upvotes = db.Column(db.Integer)
downvotes = db.Column(db.Integer)
comment = db.relationship("Comments", backref="pitches", lazy="dynamic")
vote = db.relationship("Votes", backref="pitches", lazy="dynamic")
def save_pitch(self):
"""
Save the pitches
"""
db.session.add(self)
db.session.commit()
# displaying of pitches
@classmethod
def get_pitches(cls, category):
pitches = Pitch.query.filter_by(pitch_category=category).all()
return pitches
@classmethod
def getPitchId(cls, id):
pitch = Pitch.query.filter_by(id=id).first()
return pitch
# 3. CLASS COMMENT
class Comments(db.Model):
"""
Comments Class comment is model for each pitch
"""
__tablename__ = 'comments'
# adding columns for our comments
id = db.Column(db. Integer, primary_key=True)
opinion = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
pitches_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
def save_comment(self):
"""
Save the Comments as per every pitch idea
"""
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(self, id):
comment = Comments.query.order_by(
)
return comment
# 4. CLASS CATEGORY
class Category(db.Model):
__tablename__ = 'categories'
# table columns
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
description = db.Column(db.String(255))
# saving pitches
def save_category(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_categories(cls):
categories = Category.query.all()
return categories
# 5. VOTES CLASS
class Votes (db.Model):
"""
Class votes is a class which will be used to create the upvotes and downvotes for the pitches
"""
__tablename__ = 'votes'
id = db.Column(db. Integer, primary_key=True)
vote = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
pitches_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
def save_vote(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_votes(cls, user_id, pitches_id):
votes = Votes.query.filter_by(
user_id=user_id, pitches_id=pitches_id).all()
return votes
def __repr__(self):
return f'{self.vote}:{self.user_id}:{self.pitches_id}'
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_upvotes(cls,id):
upvote = Upvote.query.filter_by(pitch_id=id).all()
return upvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
class Downvote(db.Model):
__tablename__ = 'downvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
pitch_id = db.Column(db.Integer,db.ForeignKey('pitches.id'))
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_downvotes(cls,id):
downvote = Downvote.query.filter_by(pitch_id=id).all()
return downvote
def __repr__(self):
return f'{self.user_id}:{self.pitch_id}'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id) | 0.618896 | 0.118666 |
import json, datetime
from tests.factories import (NOWSubmissionFactory, NOWApplicationIdentityFactory)
class TestGetApplicationResource:
"""GET /now-submissions/applications/{now_number}/status"""
def test_get_now_application_status_by_now_number_success(self, test_client, db_session,
auth_headers):
"""Should return the correct record with a 200 response code"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_number}/status',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['now_application_status_code'] is not None
assert get_data[
'now_application_status_code'] == identity.now_application.now_application_status_code
"""GET /now-submissions/applications/status?status_updated_date_since={date}"""
def test_get_now_application_status_updates_since_success(self, test_client, db_session,
auth_headers):
"""Should return the correct records with a 200 response code"""
today = datetime.datetime.today()
status_updated_date = today
identities = []
for i in range(3):
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
identity.now_application.status_updated_date = status_updated_date
status_updated_date = status_updated_date + datetime.timedelta(days=+1)
identity.save()
identities.append(identity)
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 3
status_updated_date = today
for identity in identities:
identity.now_application.status_updated_date = status_updated_date
status_updated_date = status_updated_date + datetime.timedelta(days=-1)
identity.save()
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 1
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today + datetime.timedelta(days=+42)}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 0 | services/core-api/tests/now_submissions/resources/test_application_status_resource.py | import json, datetime
from tests.factories import (NOWSubmissionFactory, NOWApplicationIdentityFactory)
class TestGetApplicationResource:
"""GET /now-submissions/applications/{now_number}/status"""
def test_get_now_application_status_by_now_number_success(self, test_client, db_session,
auth_headers):
"""Should return the correct record with a 200 response code"""
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
get_resp = test_client.get(
f'/now-submissions/applications/{identity.now_number}/status',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert get_data['now_application_status_code'] is not None
assert get_data[
'now_application_status_code'] == identity.now_application.now_application_status_code
"""GET /now-submissions/applications/status?status_updated_date_since={date}"""
def test_get_now_application_status_updates_since_success(self, test_client, db_session,
auth_headers):
"""Should return the correct records with a 200 response code"""
today = datetime.datetime.today()
status_updated_date = today
identities = []
for i in range(3):
now_submission = NOWSubmissionFactory()
identity = NOWApplicationIdentityFactory(now_submission=now_submission)
identity.now_application.status_updated_date = status_updated_date
status_updated_date = status_updated_date + datetime.timedelta(days=+1)
identity.save()
identities.append(identity)
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 3
status_updated_date = today
for identity in identities:
identity.now_application.status_updated_date = status_updated_date
status_updated_date = status_updated_date + datetime.timedelta(days=-1)
identity.save()
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 1
get_resp = test_client.get(
f'/now-submissions/applications/status?status_updated_date_since={today + datetime.timedelta(days=+42)}',
headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, get_resp.response
get_data = json.loads(get_resp.data.decode())
assert len(get_data) == 0 | 0.438545 | 0.144934 |
import random
import globals
# Create two concurrent lists representing a split row
# - row is the front end of the row with sec_estimate faredecode_dict
# - row1 is the back end of the row
def simulate_row(epsilon, taxi_id, spd, cp, fr):
row = {}
row1 = {}
# Create row = epsilon, taxi_id, shift, company_id, pcs, dca, payment_type, sec_estimate
row['epsilon'] = epsilon
row['taxi_id'] = taxi_id
row['shift'] = int(str(spd)[1:3])
row['company_id'] = int(str(cp)[1:4])
pca = int(str(spd)[3:5])
if pca == 0:
pca = -1
row['pickup_community_area'] = pca
dca = int(str(spd)[5:7])
if dca == 0:
dca = -1
row['dropoff_community_area'] = dca
pay = int(str(cp)[4:5])
if pay == 9:
pay = -1
row['payment_type'] = pay
pca_dca = str(spd)[3:7]
sec_estimate = globals.prox_dict[pca_dca]
row['sec_estimate'] = sec_estimate
# Create row1 = fare, tips, trip_total, trip_seconds, trip_miles
fare_range = int(str(fr)[0:2])
tips_range = int(str(fr)[2:4])
if fare_range == 21:
v = 50
else:
v = globals.faredecode_dict[fare_range]
if v == 0:
value = 0
elif v == 50:
value = random.randrange(50, 100)
else:
value = random.randrange(v - 5, v)
row1['fare'] = value
fare = value
if tips_range == 21:
v = 20
else:
v = globals.tipsdecode_dict[tips_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['tips'] = value
tips = value
row1['trip_total'] = fare + tips
sec_range = int(str(fr)[4:6])
miles_range = int(str(fr)[6:8])
if sec_range == 61:
v = 5000
else:
v = globals.secdecode_dict[sec_range]
if v == 0:
value = 0
elif v == 5000:
value = random.randrange(5000, 10000)
else:
value = random.randrange(v - 100, v)
row1['trip_seconds'] = value
if miles_range == 21:
v = 20
else:
v = globals.milesdecode_dict[miles_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['trip_miles'] = value
return row, row1 | src/simulate_row.py | import random
import globals
# Create two concurrent lists representing a split row
# - row is the front end of the row with sec_estimate faredecode_dict
# - row1 is the back end of the row
def simulate_row(epsilon, taxi_id, spd, cp, fr):
row = {}
row1 = {}
# Create row = epsilon, taxi_id, shift, company_id, pcs, dca, payment_type, sec_estimate
row['epsilon'] = epsilon
row['taxi_id'] = taxi_id
row['shift'] = int(str(spd)[1:3])
row['company_id'] = int(str(cp)[1:4])
pca = int(str(spd)[3:5])
if pca == 0:
pca = -1
row['pickup_community_area'] = pca
dca = int(str(spd)[5:7])
if dca == 0:
dca = -1
row['dropoff_community_area'] = dca
pay = int(str(cp)[4:5])
if pay == 9:
pay = -1
row['payment_type'] = pay
pca_dca = str(spd)[3:7]
sec_estimate = globals.prox_dict[pca_dca]
row['sec_estimate'] = sec_estimate
# Create row1 = fare, tips, trip_total, trip_seconds, trip_miles
fare_range = int(str(fr)[0:2])
tips_range = int(str(fr)[2:4])
if fare_range == 21:
v = 50
else:
v = globals.faredecode_dict[fare_range]
if v == 0:
value = 0
elif v == 50:
value = random.randrange(50, 100)
else:
value = random.randrange(v - 5, v)
row1['fare'] = value
fare = value
if tips_range == 21:
v = 20
else:
v = globals.tipsdecode_dict[tips_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['tips'] = value
tips = value
row1['trip_total'] = fare + tips
sec_range = int(str(fr)[4:6])
miles_range = int(str(fr)[6:8])
if sec_range == 61:
v = 5000
else:
v = globals.secdecode_dict[sec_range]
if v == 0:
value = 0
elif v == 5000:
value = random.randrange(5000, 10000)
else:
value = random.randrange(v - 100, v)
row1['trip_seconds'] = value
if miles_range == 21:
v = 20
else:
v = globals.milesdecode_dict[miles_range]
if v == 0:
value = 0
elif v == 20:
value = random.randrange(20, 50)
else:
value = random.randrange(v - 2, v)
row1['trip_miles'] = value
return row, row1 | 0.322526 | 0.572962 |
from kv1_811 import *
from inserter import insert,version_imported,versions_imported,getConnection
from settings.const import *
import urllib2
from lxml import etree
import logging
logger = logging.getLogger("importer")
url_gvb = 'http://pol.gvb.nl/gvbpublicatieinternet/KV1/'
kv1index_gvb = url_gvb+'KV1index.xml'
def getDataSource():
return { '1' : {
'operator_id' : 'GVB',
'name' : 'GVB',
'description' : 'GVB KV1delta leveringen',
'email' : '<EMAIL>',
'url' : kv1index_gvb}}
def getOperator():
return { 'GVB' : {'privatecode' : 'GVB',
'operator_id' : 'GVB',
'name' : 'GVB',
'phone' : '0900-8011',
'url' : 'http://www.gvb.nl',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'}}
def setLineColors():
conn = psycopg2.connect(database_connect)
cur = conn.cursor()
cur.execute("""
UPDATE line set color_shield = '187a36' where operator_id = 'GVB:50';
UPDATE line set color_text = 'ffffff' where operator_id = 'GVB:50';
UPDATE line set color_shield = 'FF6600' where operator_id = 'GVB:51';
UPDATE line set color_text = '000000' where operator_id = 'GVB:51';
UPDATE line set color_shield = 'd81118' where operator_id = 'GVB:53';
UPDATE line set color_text = 'ffffff' where operator_id = 'GVB:53';
UPDATE line set color_shield = 'fff200' where operator_id = 'GVB:54';
UPDATE line set color_text = '000000' where operator_id = 'GVB:54';
""")
cur.close()
conn.commit()
conn.close()
def calculateTimeDemandGroupsGVB(conn):
cur = conn.cursor('timdemgrps',cursor_factory=psycopg2.extras.RealDictCursor)
timdemgroup_ids = {}
timdemgroups = {}
journeyinfo = {}
cur.execute("""
SELECT concat_ws(':',dataownercode, organizationalunitcode, schedulecode, scheduletypecode, lineplanningnumber, journeynumber) as
JOURNEY_id,
array_agg(cast(patternpass.stoporder as integer) order by patternpass.stoporder) as
stoporders,array_agg(toseconds(coalesce(targetarrivaltime,targetdeparturetime),0) order by patternpass.stoporder) as
arrivaltimes,array_agg(toseconds(coalesce(targetdeparturetime,targetarrivaltime),0) order by patternpass.stoporder) as departuretimes
FROM patternpass JOIN pujopass USING (version,dataownercode,lineplanningnumber,journeypatterncode,userstopcode)
GROUP BY JOURNEY_id
""")
for row in cur:
points = [(row['stoporders'][0],0,0)]
dep_time = row['departuretimes'][0]
for i in range(len(row['stoporders'][:-1])):
cur_arr_time = row['arrivaltimes'][i+1]
cur_dep_time = row['departuretimes'][i+1]
points.append((row['stoporders'][i+1],cur_arr_time-dep_time,cur_dep_time-cur_arr_time))
m = md5.new()
m.update(str(points))
timdemgrp = {'POINTS' : []}
for point in points:
point_dict = {'pointorder' : point[0],'totaldrivetime' : point[1], 'stopwaittime' : point[2]}
timdemgrp['POINTS'].append(point_dict)
journeyinfo[row['journey_id']] = {'departuretime' : dep_time, 'timedemandgroupref' : m.hexdigest()}
timdemgrp['operator_id'] = m.hexdigest()
timdemgroups[m.hexdigest()] = timdemgrp
cur.close()
return (journeyinfo,timdemgroups)
def import_zip(path,filename,meta=None):
deprecated,conn = load(path,filename)
try:
data = {}
data['DATASOURCE'] = getDataSource()
data['OPERATOR'] = getOperator()
data['MERGESTRATEGY'] = []
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : ':'.join(['GVB',meta['key'],meta['dataownerversion']]),
'datasourceref' : '1',
'operator_id' : ':'.join(['GVB',meta['key'],meta['dataownerversion']]),
'startdate' : meta['validfrom'],
'versionmajor' : meta['index'],
'versionminor' : meta['dataownerversion'],
'enddate' : meta['validthru'],
'description' : filename}
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn)
data['LINE'] = getLines(conn)
data['STOPPOINT'] = getStopPoints(conn)
data['STOPAREA'] = getStopAreas(conn)
data['AVAILABILITYCONDITION'] = getAvailabilityConditionsFromSchedvers(conn)
data['PRODUCTCATEGORY'] = getBISONproductcategories()
data['ADMINISTRATIVEZONE'] = getAdministrativeZones(conn)
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroupsGVB(conn)
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getPool811)
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'])
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn)
data['NOTICEASSIGNMENT'] = {}
data['NOTICE'] = {}
data['NOTICEGROUP'] = {}
insert(data)
conn.close()
setLineColors()
except:
raise
def download(url,filename,version):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,version)
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def merge():
conn = getConnection()
cur = conn.cursor()
cur.execute("""
begin;
UPDATE availabilityconditionday set isavailable = false WHERE availabilityconditionref IN
(SELECT availabilitycondition.id FROM availabilitycondition
JOIN version ON (versionref = version.id)
JOIN datasource ON (datasourceref= datasource.id)
WHERE datasource.operator_id = 'GVB');
UPDATE availabilityconditionday set isavailable = true WHERE availabilityconditionref||':'||validdate in (
SELECT DISTINCT ON (unitcode,ac.validdate) ac.id||':'||ac.validdate
FROM
(SELECT *,generate_series(fromdate,todate,interval '1 day')::date as validdate FROM availabilitycondition) as ac JOIN
(SELECT version.id as versionref,version.operator_id,startdate,enddate,version.description,row_number() over (order by startdate ASC,enddate DESC)
as idx
FROM VERSION JOIN datasource ON (datasourceref= datasource.id)
WHERE datasource.operator_id = 'GVB'
ORDER BY startdate ASC,enddate DESC) as importorder USING (versionref)
LEFT JOIN availabilityconditionday as ad ON (ad.availabilityconditionref = ac.id AND ad.validdate = ac.validdate)
ORDER BY unitcode ASC,ac.validdate ASC,importorder.idx DESC,(ad.validdate is not null) DESC
);
commit;
""")
conn.commit()
conn.close()
def deleteversion(versionId):
conn = getConnection()
cur = conn.cursor()
print 'Deleting '+versionId
print 'Delete journeys'
cur.execute("""
DELETE FROM journey WHERE availabilityconditionref IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete validdays'
cur.execute("""
DELETE FROM availabilityconditionday WHERE availabilityconditionref IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete availabilityconditions'
cur.execute("""
DELETE FROM availabilitycondition WHERE id IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete versionrecord'
cur.execute("""DELETE FROM version WHERE operator_id = %s""",[versionId])
conn.commit()
cur.close()
conn.close()
def sync():
tree = etree.parse(urllib2.urlopen(kv1index_gvb))
index = []
for periode in tree.findall('periode'):
file = {}
file['key'] = periode.attrib['key']
file['filename'] = periode.find('zipfile').text
file['dataownerversion'] = periode.find('versie').text
file['ispublished'] = periode.find('isgepubliceerd').text
file['publishdate'] = periode.find('publicatiedatum').text
file['isbaseline'] = (periode.find('isbaseline').text == 'true')
if file['ispublished'] == 'false':
continue
file['validfrom'] = periode.find('startdatum').text
file['validthru'] = periode.find('einddatum').text
file['index'] = int(periode.find('index').text)
if file['key'] == 'a00bac99-e404-4783-b2f7-a39d48747999':
file['isbaseline'] = True
index.append(file)
index = multikeysort(index, ['validfrom', '-validthru'])
imported = versions_imported('GVB')
for f in index:
key = ':'.join(['GVB',f['key'],f['dataownerversion']])
if key not in imported:
logger.info('Import file %s version %s' % (f['filename'],str(f['dataownerversion'])))
try:
download(url_gvb+f['filename'],f['filename'],f)
except Exception as e:
print e
else:
imported.remove(key)
for expiredVersion in imported:
deleteversion(expiredVersion)
merge() | importers/gvb.py | from kv1_811 import *
from inserter import insert,version_imported,versions_imported,getConnection
from settings.const import *
import urllib2
from lxml import etree
import logging
logger = logging.getLogger("importer")
url_gvb = 'http://pol.gvb.nl/gvbpublicatieinternet/KV1/'
kv1index_gvb = url_gvb+'KV1index.xml'
def getDataSource():
return { '1' : {
'operator_id' : 'GVB',
'name' : 'GVB',
'description' : 'GVB KV1delta leveringen',
'email' : '<EMAIL>',
'url' : kv1index_gvb}}
def getOperator():
return { 'GVB' : {'privatecode' : 'GVB',
'operator_id' : 'GVB',
'name' : 'GVB',
'phone' : '0900-8011',
'url' : 'http://www.gvb.nl',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'}}
def setLineColors():
conn = psycopg2.connect(database_connect)
cur = conn.cursor()
cur.execute("""
UPDATE line set color_shield = '187a36' where operator_id = 'GVB:50';
UPDATE line set color_text = 'ffffff' where operator_id = 'GVB:50';
UPDATE line set color_shield = 'FF6600' where operator_id = 'GVB:51';
UPDATE line set color_text = '000000' where operator_id = 'GVB:51';
UPDATE line set color_shield = 'd81118' where operator_id = 'GVB:53';
UPDATE line set color_text = 'ffffff' where operator_id = 'GVB:53';
UPDATE line set color_shield = 'fff200' where operator_id = 'GVB:54';
UPDATE line set color_text = '000000' where operator_id = 'GVB:54';
""")
cur.close()
conn.commit()
conn.close()
def calculateTimeDemandGroupsGVB(conn):
cur = conn.cursor('timdemgrps',cursor_factory=psycopg2.extras.RealDictCursor)
timdemgroup_ids = {}
timdemgroups = {}
journeyinfo = {}
cur.execute("""
SELECT concat_ws(':',dataownercode, organizationalunitcode, schedulecode, scheduletypecode, lineplanningnumber, journeynumber) as
JOURNEY_id,
array_agg(cast(patternpass.stoporder as integer) order by patternpass.stoporder) as
stoporders,array_agg(toseconds(coalesce(targetarrivaltime,targetdeparturetime),0) order by patternpass.stoporder) as
arrivaltimes,array_agg(toseconds(coalesce(targetdeparturetime,targetarrivaltime),0) order by patternpass.stoporder) as departuretimes
FROM patternpass JOIN pujopass USING (version,dataownercode,lineplanningnumber,journeypatterncode,userstopcode)
GROUP BY JOURNEY_id
""")
for row in cur:
points = [(row['stoporders'][0],0,0)]
dep_time = row['departuretimes'][0]
for i in range(len(row['stoporders'][:-1])):
cur_arr_time = row['arrivaltimes'][i+1]
cur_dep_time = row['departuretimes'][i+1]
points.append((row['stoporders'][i+1],cur_arr_time-dep_time,cur_dep_time-cur_arr_time))
m = md5.new()
m.update(str(points))
timdemgrp = {'POINTS' : []}
for point in points:
point_dict = {'pointorder' : point[0],'totaldrivetime' : point[1], 'stopwaittime' : point[2]}
timdemgrp['POINTS'].append(point_dict)
journeyinfo[row['journey_id']] = {'departuretime' : dep_time, 'timedemandgroupref' : m.hexdigest()}
timdemgrp['operator_id'] = m.hexdigest()
timdemgroups[m.hexdigest()] = timdemgrp
cur.close()
return (journeyinfo,timdemgroups)
def import_zip(path,filename,meta=None):
deprecated,conn = load(path,filename)
try:
data = {}
data['DATASOURCE'] = getDataSource()
data['OPERATOR'] = getOperator()
data['MERGESTRATEGY'] = []
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : ':'.join(['GVB',meta['key'],meta['dataownerversion']]),
'datasourceref' : '1',
'operator_id' : ':'.join(['GVB',meta['key'],meta['dataownerversion']]),
'startdate' : meta['validfrom'],
'versionmajor' : meta['index'],
'versionminor' : meta['dataownerversion'],
'enddate' : meta['validthru'],
'description' : filename}
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn)
data['LINE'] = getLines(conn)
data['STOPPOINT'] = getStopPoints(conn)
data['STOPAREA'] = getStopAreas(conn)
data['AVAILABILITYCONDITION'] = getAvailabilityConditionsFromSchedvers(conn)
data['PRODUCTCATEGORY'] = getBISONproductcategories()
data['ADMINISTRATIVEZONE'] = getAdministrativeZones(conn)
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroupsGVB(conn)
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getPool811)
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'])
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn)
data['NOTICEASSIGNMENT'] = {}
data['NOTICE'] = {}
data['NOTICEGROUP'] = {}
insert(data)
conn.close()
setLineColors()
except:
raise
def download(url,filename,version):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,version)
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def merge():
conn = getConnection()
cur = conn.cursor()
cur.execute("""
begin;
UPDATE availabilityconditionday set isavailable = false WHERE availabilityconditionref IN
(SELECT availabilitycondition.id FROM availabilitycondition
JOIN version ON (versionref = version.id)
JOIN datasource ON (datasourceref= datasource.id)
WHERE datasource.operator_id = 'GVB');
UPDATE availabilityconditionday set isavailable = true WHERE availabilityconditionref||':'||validdate in (
SELECT DISTINCT ON (unitcode,ac.validdate) ac.id||':'||ac.validdate
FROM
(SELECT *,generate_series(fromdate,todate,interval '1 day')::date as validdate FROM availabilitycondition) as ac JOIN
(SELECT version.id as versionref,version.operator_id,startdate,enddate,version.description,row_number() over (order by startdate ASC,enddate DESC)
as idx
FROM VERSION JOIN datasource ON (datasourceref= datasource.id)
WHERE datasource.operator_id = 'GVB'
ORDER BY startdate ASC,enddate DESC) as importorder USING (versionref)
LEFT JOIN availabilityconditionday as ad ON (ad.availabilityconditionref = ac.id AND ad.validdate = ac.validdate)
ORDER BY unitcode ASC,ac.validdate ASC,importorder.idx DESC,(ad.validdate is not null) DESC
);
commit;
""")
conn.commit()
conn.close()
def deleteversion(versionId):
conn = getConnection()
cur = conn.cursor()
print 'Deleting '+versionId
print 'Delete journeys'
cur.execute("""
DELETE FROM journey WHERE availabilityconditionref IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete validdays'
cur.execute("""
DELETE FROM availabilityconditionday WHERE availabilityconditionref IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete availabilityconditions'
cur.execute("""
DELETE FROM availabilitycondition WHERE id IN
(SELECT DISTINCT ac.id FROM availabilitycondition as ac LEFT JOIN version ON (version.id = versionref) WHERE version.operator_id = %s)""",[versionId])
print 'Delete versionrecord'
cur.execute("""DELETE FROM version WHERE operator_id = %s""",[versionId])
conn.commit()
cur.close()
conn.close()
def sync():
tree = etree.parse(urllib2.urlopen(kv1index_gvb))
index = []
for periode in tree.findall('periode'):
file = {}
file['key'] = periode.attrib['key']
file['filename'] = periode.find('zipfile').text
file['dataownerversion'] = periode.find('versie').text
file['ispublished'] = periode.find('isgepubliceerd').text
file['publishdate'] = periode.find('publicatiedatum').text
file['isbaseline'] = (periode.find('isbaseline').text == 'true')
if file['ispublished'] == 'false':
continue
file['validfrom'] = periode.find('startdatum').text
file['validthru'] = periode.find('einddatum').text
file['index'] = int(periode.find('index').text)
if file['key'] == 'a00bac99-e404-4783-b2f7-a39d48747999':
file['isbaseline'] = True
index.append(file)
index = multikeysort(index, ['validfrom', '-validthru'])
imported = versions_imported('GVB')
for f in index:
key = ':'.join(['GVB',f['key'],f['dataownerversion']])
if key not in imported:
logger.info('Import file %s version %s' % (f['filename'],str(f['dataownerversion'])))
try:
download(url_gvb+f['filename'],f['filename'],f)
except Exception as e:
print e
else:
imported.remove(key)
for expiredVersion in imported:
deleteversion(expiredVersion)
merge() | 0.26923 | 0.107766 |
import xarray as xr
import numpy as np
import quaternion
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from itertools import product
from typing import Optional
from pathlib import Path
class Plotter:
def __init__(self):
pass
def plot(self, result: xr.Dataset, sup_title=None, figsize=(15, 7), states=None):
if states is None:
states = result.data_vars
for state_name in states:
if state_name in result:
self.default_plotter(result[state_name], sup_title, figsize)
def plot_to_pdf(self, path: Path, result: xr.Dataset, sup_title=None, figsize=(15, 7), states=None):
if states is None:
states = result.data_vars
with PdfPages(str(path)) as pdf_file:
for state_name in states:
if state_name in result:
figs = self.default_plotter(result[state_name], sup_title, figsize)
for fig in figs:
pdf_file.savefig(fig)
plt.close(fig)
print(f"Results plot saved at: {path.absolute()}")
def default_plotter(self, data: xr.DataArray, sup_title: Optional[str]=None, figsize=(15, 7)):
fig = plt.figure(figsize=figsize)
data_values = data.values
data_t = data.t.values
plt.title(data.name)
if sup_title:
plt.suptitle(sup_title)
line_styles = ["-", "--", ":", "-."]
if np.ndim(data_values) != 1:
# Shape the the values, the first dimension is time.
shape = np.shape(data_values)[1:]
legend = []
for idx in product(*(range(dim_size) for dim_size in shape)):
plt.plot(data_t, data_values[(slice(None), *idx)], line_styles[idx[0] % 4])
legend.append(", ".join(map(str, idx)))
plt.legend(legend)
return fig,
else:
if data.dtype == np.quaternion:
plt.plot(data_t, quaternion.as_float_array(data_values))
plt.legend(["q0", "q1", "q2", "q3"])
fig2 = plt.figure(figsize=figsize)
plt.title(f"{data.name} (euler)")
plt.plot(data_t, quaternion.as_euler_angles(data_values))
plt.legend(["x", "y", "z"])
return fig, fig2
else:
plt.plot(data_t, data_values)
return fig, | cw/simulation/plotter.py | import xarray as xr
import numpy as np
import quaternion
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from itertools import product
from typing import Optional
from pathlib import Path
class Plotter:
def __init__(self):
pass
def plot(self, result: xr.Dataset, sup_title=None, figsize=(15, 7), states=None):
if states is None:
states = result.data_vars
for state_name in states:
if state_name in result:
self.default_plotter(result[state_name], sup_title, figsize)
def plot_to_pdf(self, path: Path, result: xr.Dataset, sup_title=None, figsize=(15, 7), states=None):
if states is None:
states = result.data_vars
with PdfPages(str(path)) as pdf_file:
for state_name in states:
if state_name in result:
figs = self.default_plotter(result[state_name], sup_title, figsize)
for fig in figs:
pdf_file.savefig(fig)
plt.close(fig)
print(f"Results plot saved at: {path.absolute()}")
def default_plotter(self, data: xr.DataArray, sup_title: Optional[str]=None, figsize=(15, 7)):
fig = plt.figure(figsize=figsize)
data_values = data.values
data_t = data.t.values
plt.title(data.name)
if sup_title:
plt.suptitle(sup_title)
line_styles = ["-", "--", ":", "-."]
if np.ndim(data_values) != 1:
# Shape the the values, the first dimension is time.
shape = np.shape(data_values)[1:]
legend = []
for idx in product(*(range(dim_size) for dim_size in shape)):
plt.plot(data_t, data_values[(slice(None), *idx)], line_styles[idx[0] % 4])
legend.append(", ".join(map(str, idx)))
plt.legend(legend)
return fig,
else:
if data.dtype == np.quaternion:
plt.plot(data_t, quaternion.as_float_array(data_values))
plt.legend(["q0", "q1", "q2", "q3"])
fig2 = plt.figure(figsize=figsize)
plt.title(f"{data.name} (euler)")
plt.plot(data_t, quaternion.as_euler_angles(data_values))
plt.legend(["x", "y", "z"])
return fig, fig2
else:
plt.plot(data_t, data_values)
return fig, | 0.792544 | 0.594845 |
from nmt_adaptation.util import arr2txt, rm_dupl_from_list, text2arr
import re
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def get_length_sentences_from_file(path_data: dir) -> list:
length_sentences = []
with open(join(path_data, "test.de")) as text:
for sentence in text:
words = sentence.split()
length_sentences.append(len(words))
return length_sentences
def get_length_sentences_from_array(arr_sentences: list) -> list:
length_sentences = []
for sentence in arr_sentences:
words = sentence.split()
length_sentences.append(len(words))
return length_sentences
def split_testset(test_df: object) -> object:
short_sentences = test_df.loc[test_df['len_src_sent'] < 10]
middle_sentences = test_df.loc[(test_df['len_src_sent'] < 20) & (test_df['len_src_sent'] >= 10)]
long_sentences = test_df.loc[test_df['len_src_sent'] >= 20]
return short_sentences, middle_sentences, long_sentences
def create_new_testset(test_df, root_dir, data_name):
short_sentences = test_df.loc[test_df['len_src_sent'] < 10]
middle_sentences = test_df.loc[(test_df['len_src_sent'] < 20) & (test_df['len_src_sent'] >= 10)]
long_sentences = test_df.loc[test_df['len_src_sent'] >= 20]
arr2txt(short_sentences['src'], join(root_dir, data_name, "short_test.de"))
arr2txt(short_sentences['trg'], join(root_dir, data_name, "short_test.en"))
arr2txt(middle_sentences['src'], join(root_dir, data_name, "middle_test.de"))
arr2txt(middle_sentences['trg'], join(root_dir, data_name, "middle_test.en"))
arr2txt(long_sentences['src'], join(root_dir, data_name, "long_test.de"))
arr2txt(long_sentences['trg'], join(root_dir, data_name, "long_test.en"))
def create_test_dict(root_dir: dir, data_name: str) -> dict:
# Get each source and target sentences from the testset file.
source = text2arr(join(root_dir, data_name, "test." + "de"))
target = text2arr(join(root_dir, data_name, "test." + "en"))
# Count all of source(German) sentence lengths.
list_len_sentence = get_length_sentences_from_file(join(root_dir, data_name))
# With all the information, create testset dictionary
test_dict = {
"name": data_name,
"src": source,
"trg": target,
"len_src_sent": list_len_sentence
}
return test_dict
def main():
root_dir = "../data/custom_data"
test_GNOME_dict = create_test_dict(root_dir=root_dir, data_name='GNOME')
test_EMEA_dict = create_test_dict(root_dir=root_dir, data_name='EMEA')
test_JRC_dict = create_test_dict(root_dir=root_dir, data_name='JRC')
emea_df = pd.DataFrame(test_EMEA_dict)
gnome_df = pd.DataFrame(test_GNOME_dict)
jrc_df = pd.DataFrame(test_JRC_dict)
create_new_testset(test_df=emea_df, root_dir=root_dir, data_name="EMEA")
create_new_testset(test_df=gnome_df, root_dir=root_dir, data_name="GNOME")
create_new_testset(test_df=jrc_df, root_dir=root_dir, data_name="JRC")
list_of_data = ['EMEA', 'GNOME', 'JRC']
for data in list_of_data:
list_len_sentence = get_length_sentences_from_file(join(root_dir, data))
counts, bins = np.histogram(list_len_sentence, bins=10, range=(0, 150))
print(f'{data} ->')
print(f'counts :{counts}')
print(f'bins : {bins}')
print(f'max : {max(list_len_sentence)}')
plt.hist(list_len_sentence, bins=100)
# plt.axis([0, 140, 0, 250])
# axis([xmin,xmax,ymin,ymax])
plt.title(data)
plt.ylabel('Counts')
plt.xlabel('Length of sentence')
plt.show()
main()
#### PLOT #### | nmt_adaptation/split_testset.py | from nmt_adaptation.util import arr2txt, rm_dupl_from_list, text2arr
import re
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def get_length_sentences_from_file(path_data: dir) -> list:
length_sentences = []
with open(join(path_data, "test.de")) as text:
for sentence in text:
words = sentence.split()
length_sentences.append(len(words))
return length_sentences
def get_length_sentences_from_array(arr_sentences: list) -> list:
length_sentences = []
for sentence in arr_sentences:
words = sentence.split()
length_sentences.append(len(words))
return length_sentences
def split_testset(test_df: object) -> object:
short_sentences = test_df.loc[test_df['len_src_sent'] < 10]
middle_sentences = test_df.loc[(test_df['len_src_sent'] < 20) & (test_df['len_src_sent'] >= 10)]
long_sentences = test_df.loc[test_df['len_src_sent'] >= 20]
return short_sentences, middle_sentences, long_sentences
def create_new_testset(test_df, root_dir, data_name):
short_sentences = test_df.loc[test_df['len_src_sent'] < 10]
middle_sentences = test_df.loc[(test_df['len_src_sent'] < 20) & (test_df['len_src_sent'] >= 10)]
long_sentences = test_df.loc[test_df['len_src_sent'] >= 20]
arr2txt(short_sentences['src'], join(root_dir, data_name, "short_test.de"))
arr2txt(short_sentences['trg'], join(root_dir, data_name, "short_test.en"))
arr2txt(middle_sentences['src'], join(root_dir, data_name, "middle_test.de"))
arr2txt(middle_sentences['trg'], join(root_dir, data_name, "middle_test.en"))
arr2txt(long_sentences['src'], join(root_dir, data_name, "long_test.de"))
arr2txt(long_sentences['trg'], join(root_dir, data_name, "long_test.en"))
def create_test_dict(root_dir: dir, data_name: str) -> dict:
# Get each source and target sentences from the testset file.
source = text2arr(join(root_dir, data_name, "test." + "de"))
target = text2arr(join(root_dir, data_name, "test." + "en"))
# Count all of source(German) sentence lengths.
list_len_sentence = get_length_sentences_from_file(join(root_dir, data_name))
# With all the information, create testset dictionary
test_dict = {
"name": data_name,
"src": source,
"trg": target,
"len_src_sent": list_len_sentence
}
return test_dict
def main():
root_dir = "../data/custom_data"
test_GNOME_dict = create_test_dict(root_dir=root_dir, data_name='GNOME')
test_EMEA_dict = create_test_dict(root_dir=root_dir, data_name='EMEA')
test_JRC_dict = create_test_dict(root_dir=root_dir, data_name='JRC')
emea_df = pd.DataFrame(test_EMEA_dict)
gnome_df = pd.DataFrame(test_GNOME_dict)
jrc_df = pd.DataFrame(test_JRC_dict)
create_new_testset(test_df=emea_df, root_dir=root_dir, data_name="EMEA")
create_new_testset(test_df=gnome_df, root_dir=root_dir, data_name="GNOME")
create_new_testset(test_df=jrc_df, root_dir=root_dir, data_name="JRC")
list_of_data = ['EMEA', 'GNOME', 'JRC']
for data in list_of_data:
list_len_sentence = get_length_sentences_from_file(join(root_dir, data))
counts, bins = np.histogram(list_len_sentence, bins=10, range=(0, 150))
print(f'{data} ->')
print(f'counts :{counts}')
print(f'bins : {bins}')
print(f'max : {max(list_len_sentence)}')
plt.hist(list_len_sentence, bins=100)
# plt.axis([0, 140, 0, 250])
# axis([xmin,xmax,ymin,ymax])
plt.title(data)
plt.ylabel('Counts')
plt.xlabel('Length of sentence')
plt.show()
main()
#### PLOT #### | 0.318061 | 0.587825 |
from __future__ import division
import ast
class Expression_Parser(ast.NodeVisitor):
"""
Transformer that safely parses an expression, disallowing any complicated
functions or control structures (inline if..else is allowed though).
"""
# Boolean operators
# The AST nodes may have multiple ops and right comparators, but we
# evaluate each op individually.
_boolean_ops = {
ast.And: lambda left, right: left and right,
ast.Or: lambda left, right: left or right
}
# Binary operators
_binary_ops = {
ast.Add: lambda left, right: left + right,
ast.Sub: lambda left, right: left - right,
ast.Mult: lambda left, right: left * right,
ast.Div: lambda left, right: left / right,
ast.Mod: lambda left, right: left % right,
ast.Pow: lambda left, right: left ** right,
ast.LShift: lambda left, right: left << right,
ast.RShift: lambda left, right: left >> right,
ast.BitOr: lambda left, right: left | right,
ast.BitXor: lambda left, right: left ^ right,
ast.BitAnd: lambda left, right: left & right,
ast.FloorDiv: lambda left, right: left // right
}
# Unary operators
_unary_ops = {
ast.Invert: lambda operand: ~operand,
ast.Not: lambda operand: not operand,
ast.UAdd: lambda operand: +operand,
ast.USub: lambda operand: -operand
}
# Comparison operators
# The AST nodes may have multiple ops and right comparators, but we
# evaluate each op individually.
_compare_ops = {
ast.Eq: lambda left, right: left == right,
ast.NotEq: lambda left, right: left != right,
ast.Lt: lambda left, right: left < right,
ast.LtE: lambda left, right: left <= right,
ast.Gt: lambda left, right: left > right,
ast.GtE: lambda left, right: left >= right,
ast.Is: lambda left, right: left is right,
ast.IsNot: lambda left, right: left is not right,
ast.In: lambda left, right: left in right,
ast.NotIn: lambda left, right: left not in right
}
# Predefined variable names
_variable_names = {
'True': True,
'False': False,
'None': None
}
# Predefined functions
_function_names = {
'int': int,
'float': float,
'bool': bool
}
def __init__(self, variables=None, functions=None, assignment=False):
self._variables = None
self.variables = variables
if functions is None:
self._functions = {}
else:
self._functions = functions
self._assignment = False
self.assignment = assignment
self._used_variables = set()
self._modified_variables = {}
def parse(self, expression, filename='<expression>'):
"""
Parse a string `expression` and return its result.
"""
self._used_variables = set()
self._modified_variables = {}
try:
return self.visit(ast.parse(expression))
except SyntaxError as error:
error.filename = filename
error.text = expression
raise error
except Exception as error:
error_type = error.__class__.__name__
if len(error.args) > 2:
line_col = error.args[1:]
else:
line_col = (1, 0)
error = SyntaxError('{}: {}'.format(error_type, error.args[0]),
(filename,) + line_col + (expression,))
raise error
@property
def variables(self):
"""
Retrieve the variables that exist in the scope of the parser.
This property returns a copy of the dictionary.
"""
return self._variables.copy()
@variables.setter
def variables(self, variables):
"""
Set a new variable scope for the expression parser.
If built-in keyword names `True`, `False` or `None` are used, then
this property raises a `NameError`.
"""
if variables is None:
variables = {}
else:
variables = variables.copy()
variable_names = set(variables.keys())
constant_names = set(self._variable_names.keys())
forbidden_variables = variable_names.intersection(constant_names)
if forbidden_variables:
keyword = 'keyword' if len(forbidden_variables) == 1 else 'keywords'
forbidden = ', '.join(forbidden_variables)
raise NameError('Cannot override {} {}'.format(keyword, forbidden))
self._variables = variables
@property
def assignment(self):
"""
Retrieve whether assignments are accepted by the parser.
"""
return self._assignment
@assignment.setter
def assignment(self, value):
"""
Enable or disable parsing assignments.
"""
self._assignment = bool(value)
@property
def used_variables(self):
"""
Retrieve the names of the variables that were evaluated in the most
recent call to `parse`. If `parse` failed with an exception, then
this set may be incomplete.
"""
return self._used_variables
@property
def modified_variables(self):
"""
Retrieve the variables that were set or modified in the most recent call
to `parse`. Since only one expression is allowed, this dictionary
contains at most one element. An augmented expression such as `+=` is
used, then the variable is only in this dictionary if the variable
is in the scope. If `parse` failed with any other exception, then
this dictionary may be incomplete. If the expression parser is set to
disallow assignments, then the dictionary is always empty.
This property returns a copy of the dictionary.
"""
return self._modified_variables.copy()
def generic_visit(self, node):
"""
Visitor for nodes that do not have a custom visitor.
This visitor denies any nodes that may not be part of the expression.
"""
raise SyntaxError('Node {} not allowed'.format(ast.dump(node)),
('', node.lineno, node.col_offset, ''))
def visit_Module(self, node):
"""
Visit the root module node.
"""
if len(node.body) != 1:
if len(node.body) > 1:
lineno = node.body[1].lineno
col_offset = node.body[1].col_offset
else:
lineno = 1
col_offset = 0
raise SyntaxError('Exactly one expression must be provided',
('', lineno, col_offset, ''))
return self.visit(node.body[0])
def visit_Expr(self, node):
"""
Visit an expression node.
"""
return self.visit(node.value)
def visit_BoolOp(self, node):
"""
Visit a boolean expression node.
"""
op = type(node.op)
func = self._boolean_ops[op]
result = func(self.visit(node.values[0]), self.visit(node.values[1]))
for value in node.values[2:]:
result = func(result, self.visit(value))
return result
def visit_BinOp(self, node):
"""
Visit a binary expression node.
"""
op = type(node.op)
func = self._binary_ops[op]
return func(self.visit(node.left), self.visit(node.right))
def visit_UnaryOp(self, node):
"""
Visit a unary expression node.
"""
op = type(node.op)
func = self._unary_ops[op]
return func(self.visit(node.operand))
def visit_IfExp(self, node):
"""
Visit an inline if..else expression node.
"""
return self.visit(node.body) if self.visit(node.test) else self.visit(node.orelse)
def visit_Compare(self, node):
"""
Visit a comparison expression node.
"""
result = self.visit(node.left)
for operator, comparator in zip(node.ops, node.comparators):
op = type(operator)
func = self._compare_ops[op]
result = func(result, self.visit(comparator))
return result
def visit_Call(self, node):
"""
Visit a function call node.
"""
name = node.func.id
if name in self._functions:
func = self._functions[name]
elif name in self._function_names:
func = self._function_names[name]
else:
raise NameError("Function '{}' is not defined".format(name),
node.lineno, node.col_offset)
args = [self.visit(arg) for arg in node.args]
keywords = dict([self.visit(keyword) for keyword in node.keywords])
# Python 2.7 starred arguments
if hasattr(node, 'starargs') and hasattr(node, 'kwargs'):
if node.starargs is not None or node.kwargs is not None:
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
return func(*args, **keywords)
def visit_Assign(self, node):
"""
Visit an assignment node.
"""
if not self.assignment:
raise SyntaxError('Assignments are not allowed in this expression',
('', node.lineno, node.col_offset, ''))
if len(node.targets) != 1:
raise SyntaxError('Multiple-target assignments are not supported',
('', node.lineno, node.col_offset, ''))
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('Assignment target must be a variable name',
('', node.lineno, node.col_offset, ''))
name = node.targets[0].id
self._modified_variables[name] = self.visit(node.value)
def visit_AugAssign(self, node):
"""
Visit an augmented assignment node.
"""
if not self.assignment:
raise SyntaxError('Assignments are not allowed in this expression',
('', node.lineno, node.col_offset, ''))
if not isinstance(node.target, ast.Name):
raise SyntaxError('Assignment target must be a variable name',
('', node.lineno, node.col_offset, ''))
name = node.target.id
if name not in self._variables:
raise NameError("Assignment name '{}' is not defined".format(name),
node.lineno, node.col_offset)
op = type(node.op)
func = self._binary_ops[op]
self._modified_variables[name] = func(self._variables[name],
self.visit(node.value))
def visit_Starred(self, node):
"""
Visit a starred function keyword argument node.
"""
# pylint: disable=no-self-use
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
def visit_keyword(self, node):
"""
Visit a function keyword argument node.
"""
if node.arg is None:
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
return (node.arg, self.visit(node.value))
def visit_Num(self, node):
"""
Visit a literal number node.
"""
# pylint: disable=no-self-use
return node.n
def visit_Name(self, node):
"""
Visit a named variable node.
"""
if node.id in self._variables:
self._used_variables.add(node.id)
return self._variables[node.id]
if node.id in self._variable_names:
return self._variable_names[node.id]
raise NameError("Name '{}' is not defined".format(node.id),
node.lineno, node.col_offset)
def visit_NameConstant(self, node):
"""
Visit a named constant singleton node (Python 3).
"""
# pylint: disable=no-self-use
return node.value | expression/parser.py | from __future__ import division
import ast
class Expression_Parser(ast.NodeVisitor):
"""
Transformer that safely parses an expression, disallowing any complicated
functions or control structures (inline if..else is allowed though).
"""
# Boolean operators
# The AST nodes may have multiple ops and right comparators, but we
# evaluate each op individually.
_boolean_ops = {
ast.And: lambda left, right: left and right,
ast.Or: lambda left, right: left or right
}
# Binary operators
_binary_ops = {
ast.Add: lambda left, right: left + right,
ast.Sub: lambda left, right: left - right,
ast.Mult: lambda left, right: left * right,
ast.Div: lambda left, right: left / right,
ast.Mod: lambda left, right: left % right,
ast.Pow: lambda left, right: left ** right,
ast.LShift: lambda left, right: left << right,
ast.RShift: lambda left, right: left >> right,
ast.BitOr: lambda left, right: left | right,
ast.BitXor: lambda left, right: left ^ right,
ast.BitAnd: lambda left, right: left & right,
ast.FloorDiv: lambda left, right: left // right
}
# Unary operators
_unary_ops = {
ast.Invert: lambda operand: ~operand,
ast.Not: lambda operand: not operand,
ast.UAdd: lambda operand: +operand,
ast.USub: lambda operand: -operand
}
# Comparison operators
# The AST nodes may have multiple ops and right comparators, but we
# evaluate each op individually.
_compare_ops = {
ast.Eq: lambda left, right: left == right,
ast.NotEq: lambda left, right: left != right,
ast.Lt: lambda left, right: left < right,
ast.LtE: lambda left, right: left <= right,
ast.Gt: lambda left, right: left > right,
ast.GtE: lambda left, right: left >= right,
ast.Is: lambda left, right: left is right,
ast.IsNot: lambda left, right: left is not right,
ast.In: lambda left, right: left in right,
ast.NotIn: lambda left, right: left not in right
}
# Predefined variable names
_variable_names = {
'True': True,
'False': False,
'None': None
}
# Predefined functions
_function_names = {
'int': int,
'float': float,
'bool': bool
}
def __init__(self, variables=None, functions=None, assignment=False):
self._variables = None
self.variables = variables
if functions is None:
self._functions = {}
else:
self._functions = functions
self._assignment = False
self.assignment = assignment
self._used_variables = set()
self._modified_variables = {}
def parse(self, expression, filename='<expression>'):
"""
Parse a string `expression` and return its result.
"""
self._used_variables = set()
self._modified_variables = {}
try:
return self.visit(ast.parse(expression))
except SyntaxError as error:
error.filename = filename
error.text = expression
raise error
except Exception as error:
error_type = error.__class__.__name__
if len(error.args) > 2:
line_col = error.args[1:]
else:
line_col = (1, 0)
error = SyntaxError('{}: {}'.format(error_type, error.args[0]),
(filename,) + line_col + (expression,))
raise error
@property
def variables(self):
"""
Retrieve the variables that exist in the scope of the parser.
This property returns a copy of the dictionary.
"""
return self._variables.copy()
@variables.setter
def variables(self, variables):
"""
Set a new variable scope for the expression parser.
If built-in keyword names `True`, `False` or `None` are used, then
this property raises a `NameError`.
"""
if variables is None:
variables = {}
else:
variables = variables.copy()
variable_names = set(variables.keys())
constant_names = set(self._variable_names.keys())
forbidden_variables = variable_names.intersection(constant_names)
if forbidden_variables:
keyword = 'keyword' if len(forbidden_variables) == 1 else 'keywords'
forbidden = ', '.join(forbidden_variables)
raise NameError('Cannot override {} {}'.format(keyword, forbidden))
self._variables = variables
@property
def assignment(self):
"""
Retrieve whether assignments are accepted by the parser.
"""
return self._assignment
@assignment.setter
def assignment(self, value):
"""
Enable or disable parsing assignments.
"""
self._assignment = bool(value)
@property
def used_variables(self):
"""
Retrieve the names of the variables that were evaluated in the most
recent call to `parse`. If `parse` failed with an exception, then
this set may be incomplete.
"""
return self._used_variables
@property
def modified_variables(self):
"""
Retrieve the variables that were set or modified in the most recent call
to `parse`. Since only one expression is allowed, this dictionary
contains at most one element. An augmented expression such as `+=` is
used, then the variable is only in this dictionary if the variable
is in the scope. If `parse` failed with any other exception, then
this dictionary may be incomplete. If the expression parser is set to
disallow assignments, then the dictionary is always empty.
This property returns a copy of the dictionary.
"""
return self._modified_variables.copy()
def generic_visit(self, node):
"""
Visitor for nodes that do not have a custom visitor.
This visitor denies any nodes that may not be part of the expression.
"""
raise SyntaxError('Node {} not allowed'.format(ast.dump(node)),
('', node.lineno, node.col_offset, ''))
def visit_Module(self, node):
"""
Visit the root module node.
"""
if len(node.body) != 1:
if len(node.body) > 1:
lineno = node.body[1].lineno
col_offset = node.body[1].col_offset
else:
lineno = 1
col_offset = 0
raise SyntaxError('Exactly one expression must be provided',
('', lineno, col_offset, ''))
return self.visit(node.body[0])
def visit_Expr(self, node):
"""
Visit an expression node.
"""
return self.visit(node.value)
def visit_BoolOp(self, node):
"""
Visit a boolean expression node.
"""
op = type(node.op)
func = self._boolean_ops[op]
result = func(self.visit(node.values[0]), self.visit(node.values[1]))
for value in node.values[2:]:
result = func(result, self.visit(value))
return result
def visit_BinOp(self, node):
"""
Visit a binary expression node.
"""
op = type(node.op)
func = self._binary_ops[op]
return func(self.visit(node.left), self.visit(node.right))
def visit_UnaryOp(self, node):
"""
Visit a unary expression node.
"""
op = type(node.op)
func = self._unary_ops[op]
return func(self.visit(node.operand))
def visit_IfExp(self, node):
"""
Visit an inline if..else expression node.
"""
return self.visit(node.body) if self.visit(node.test) else self.visit(node.orelse)
def visit_Compare(self, node):
"""
Visit a comparison expression node.
"""
result = self.visit(node.left)
for operator, comparator in zip(node.ops, node.comparators):
op = type(operator)
func = self._compare_ops[op]
result = func(result, self.visit(comparator))
return result
def visit_Call(self, node):
"""
Visit a function call node.
"""
name = node.func.id
if name in self._functions:
func = self._functions[name]
elif name in self._function_names:
func = self._function_names[name]
else:
raise NameError("Function '{}' is not defined".format(name),
node.lineno, node.col_offset)
args = [self.visit(arg) for arg in node.args]
keywords = dict([self.visit(keyword) for keyword in node.keywords])
# Python 2.7 starred arguments
if hasattr(node, 'starargs') and hasattr(node, 'kwargs'):
if node.starargs is not None or node.kwargs is not None:
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
return func(*args, **keywords)
def visit_Assign(self, node):
"""
Visit an assignment node.
"""
if not self.assignment:
raise SyntaxError('Assignments are not allowed in this expression',
('', node.lineno, node.col_offset, ''))
if len(node.targets) != 1:
raise SyntaxError('Multiple-target assignments are not supported',
('', node.lineno, node.col_offset, ''))
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('Assignment target must be a variable name',
('', node.lineno, node.col_offset, ''))
name = node.targets[0].id
self._modified_variables[name] = self.visit(node.value)
def visit_AugAssign(self, node):
"""
Visit an augmented assignment node.
"""
if not self.assignment:
raise SyntaxError('Assignments are not allowed in this expression',
('', node.lineno, node.col_offset, ''))
if not isinstance(node.target, ast.Name):
raise SyntaxError('Assignment target must be a variable name',
('', node.lineno, node.col_offset, ''))
name = node.target.id
if name not in self._variables:
raise NameError("Assignment name '{}' is not defined".format(name),
node.lineno, node.col_offset)
op = type(node.op)
func = self._binary_ops[op]
self._modified_variables[name] = func(self._variables[name],
self.visit(node.value))
def visit_Starred(self, node):
"""
Visit a starred function keyword argument node.
"""
# pylint: disable=no-self-use
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
def visit_keyword(self, node):
"""
Visit a function keyword argument node.
"""
if node.arg is None:
raise SyntaxError('Star arguments are not supported',
('', node.lineno, node.col_offset, ''))
return (node.arg, self.visit(node.value))
def visit_Num(self, node):
"""
Visit a literal number node.
"""
# pylint: disable=no-self-use
return node.n
def visit_Name(self, node):
"""
Visit a named variable node.
"""
if node.id in self._variables:
self._used_variables.add(node.id)
return self._variables[node.id]
if node.id in self._variable_names:
return self._variable_names[node.id]
raise NameError("Name '{}' is not defined".format(node.id),
node.lineno, node.col_offset)
def visit_NameConstant(self, node):
"""
Visit a named constant singleton node (Python 3).
"""
# pylint: disable=no-self-use
return node.value | 0.858511 | 0.843444 |
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import time
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.op.op_context import EnterOpContext
from base_test import DBBaseTestCase
class NotificationTestCase(DBBaseTestCase):
def testSimulateNotificationRaces(self):
"""Try to create notification with same id twice to simulate race condition."""
notification = Notification(self._user.user_id, 100)
notification.name = 'test'
notification.timestamp = time.time()
notification.sender_id = self._user.user_id
notification.sender_device_id = 1
notification.badge = 0
notification.activity_id = 'a123'
notification.viewpoint_id = 'v123'
success = self._RunAsync(notification._TryUpdate, self._client)
self.assertTrue(success)
notification.badge = 1
success = self._RunAsync(notification._TryUpdate, self._client)
self.assertFalse(success)
def testNotificationRaces(self):
"""Concurrently create many notifications to force races."""
op = Operation(1, 'o123')
with util.ArrayBarrier(self.stop) as b:
for i in xrange(10):
Notification.CreateForUser(self._client,
op,
1,
'test',
callback=b.Callback(),
invalidate={'invalid': True},
activity_id='a123',
viewpoint_id='v%d' % i,
inc_badge=True)
notifications = self.wait()
for i, notification in enumerate(notifications):
self.assertEqual(notification.user_id, 1)
self.assertEqual(notification.name, 'test')
self.assertEqual(notification.activity_id, 'a123')
self.assertEqual(notification.viewpoint_id, 'v%d' % i)
self.assertEqual(notification.badge, i + 1) | backend/db/test/notification_test.py | __authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import time
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.notification import Notification
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.op.op_context import EnterOpContext
from base_test import DBBaseTestCase
class NotificationTestCase(DBBaseTestCase):
def testSimulateNotificationRaces(self):
"""Try to create notification with same id twice to simulate race condition."""
notification = Notification(self._user.user_id, 100)
notification.name = 'test'
notification.timestamp = time.time()
notification.sender_id = self._user.user_id
notification.sender_device_id = 1
notification.badge = 0
notification.activity_id = 'a123'
notification.viewpoint_id = 'v123'
success = self._RunAsync(notification._TryUpdate, self._client)
self.assertTrue(success)
notification.badge = 1
success = self._RunAsync(notification._TryUpdate, self._client)
self.assertFalse(success)
def testNotificationRaces(self):
"""Concurrently create many notifications to force races."""
op = Operation(1, 'o123')
with util.ArrayBarrier(self.stop) as b:
for i in xrange(10):
Notification.CreateForUser(self._client,
op,
1,
'test',
callback=b.Callback(),
invalidate={'invalid': True},
activity_id='a123',
viewpoint_id='v%d' % i,
inc_badge=True)
notifications = self.wait()
for i, notification in enumerate(notifications):
self.assertEqual(notification.user_id, 1)
self.assertEqual(notification.name, 'test')
self.assertEqual(notification.activity_id, 'a123')
self.assertEqual(notification.viewpoint_id, 'v%d' % i)
self.assertEqual(notification.badge, i + 1) | 0.568176 | 0.156523 |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Runs a flight simulation.')
parser.add_argument('--final_mass', type=float, default=0.9, help='The rockets total dry weight without the motor. This value is in kg.')
parser.add_argument('--propellant_mass', type=float, default=0.06, help='The weight of the propellant from the rockets motor. This value is in kg.')
parser.add_argument('--motor_mass', type=float, default=0.09, help='The total weight of the rockets motor. This value is in kg.')
args = parser.parse_args()
data = pd.read_csv('F15-0.csv')
meters_per_seconds_squared = [] # change in velocity per time
meter_per_seconds = []
def rocket_equation(final_mass=args.final_mass, motor_mass=args.motor_mass, propellant_mass=args.propellant_mass):
delta_motor_mass = (motor_mass - propellant_mass) + propellant_mass
initial_mass = final_mass + delta_motor_mass
print(f'Initial mass: {initial_mass}')
thrust = data[['Thrust(N)']].to_numpy() # 1N = 1 kg * m / s^2
delta_time = data[['Time(Sec)']].to_numpy() # time in seconds that has passed since the last frame
for i in range(len(thrust)):
if propellant_mass != 0.00:
propellant_mass = round(propellant_mass - 0.01, 2)
delta_motor_mass = (motor_mass - args.propellant_mass) + propellant_mass # 90 grams of Total Motor Mass
initial_mass = final_mass + delta_motor_mass
meters_per_seconds_squared.append(thrust[i] / round(initial_mass, 2))
acceleration = np.asarray(meters_per_seconds_squared)
meter_per_seconds.append(acceleration[i] * delta_time[i])
velocity = np.asarray(meter_per_seconds)
print(f'Final mass: {initial_mass}')
print(delta_motor_mass)
# Plotting rockets acceleration.
plt.subplot(2, 1, 1)
plt.title('Vehicles acceleration on F15-0')
plt.tight_layout(pad=5.0)
plt.plot(delta_time, acceleration)
plt.xlabel('Time in seconds (s)')
plt.ylabel('Acceleration (m/s^2)')
# Plotting rockets velocity
plt.subplot(2, 1, 2)
plt.title('Vehicles velocity on F15-0')
plt.plot(delta_time, velocity)
plt.xlabel('Time in seconds (s)')
plt.ylabel('Velocity (m/s)')
plt.show()
if __name__ == '__main__':
rocket_equation() | sim/falcon1_sim.py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Runs a flight simulation.')
parser.add_argument('--final_mass', type=float, default=0.9, help='The rockets total dry weight without the motor. This value is in kg.')
parser.add_argument('--propellant_mass', type=float, default=0.06, help='The weight of the propellant from the rockets motor. This value is in kg.')
parser.add_argument('--motor_mass', type=float, default=0.09, help='The total weight of the rockets motor. This value is in kg.')
args = parser.parse_args()
data = pd.read_csv('F15-0.csv')
meters_per_seconds_squared = [] # change in velocity per time
meter_per_seconds = []
def rocket_equation(final_mass=args.final_mass, motor_mass=args.motor_mass, propellant_mass=args.propellant_mass):
delta_motor_mass = (motor_mass - propellant_mass) + propellant_mass
initial_mass = final_mass + delta_motor_mass
print(f'Initial mass: {initial_mass}')
thrust = data[['Thrust(N)']].to_numpy() # 1N = 1 kg * m / s^2
delta_time = data[['Time(Sec)']].to_numpy() # time in seconds that has passed since the last frame
for i in range(len(thrust)):
if propellant_mass != 0.00:
propellant_mass = round(propellant_mass - 0.01, 2)
delta_motor_mass = (motor_mass - args.propellant_mass) + propellant_mass # 90 grams of Total Motor Mass
initial_mass = final_mass + delta_motor_mass
meters_per_seconds_squared.append(thrust[i] / round(initial_mass, 2))
acceleration = np.asarray(meters_per_seconds_squared)
meter_per_seconds.append(acceleration[i] * delta_time[i])
velocity = np.asarray(meter_per_seconds)
print(f'Final mass: {initial_mass}')
print(delta_motor_mass)
# Plotting rockets acceleration.
plt.subplot(2, 1, 1)
plt.title('Vehicles acceleration on F15-0')
plt.tight_layout(pad=5.0)
plt.plot(delta_time, acceleration)
plt.xlabel('Time in seconds (s)')
plt.ylabel('Acceleration (m/s^2)')
# Plotting rockets velocity
plt.subplot(2, 1, 2)
plt.title('Vehicles velocity on F15-0')
plt.plot(delta_time, velocity)
plt.xlabel('Time in seconds (s)')
plt.ylabel('Velocity (m/s)')
plt.show()
if __name__ == '__main__':
rocket_equation() | 0.740268 | 0.4575 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SponsorshipType.display_heading'
db.add_column('sponsors_sponsorshiptype', 'display_heading', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SponsorshipType.display_heading'
db.delete_column('sponsors_sponsorshiptype', 'display_heading')
models = {
'sponsors.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'aggregate_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sponsors.SponsorshipType']", 'null': 'True', 'blank': 'True'}),
'blurb': ('markupfields.fields.SmartlinksTextileField', [], {'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'sponsors.sponsorshiptype': {
'Meta': {'object_name': 'SponsorshipType'},
'display_heading': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_size': ('django.db.models.fields.IntegerField', [], {}),
'plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sponsors'] | sponsors/migrations/0002_auto__add_field_sponsorshiptype_display_heading.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SponsorshipType.display_heading'
db.add_column('sponsors_sponsorshiptype', 'display_heading', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SponsorshipType.display_heading'
db.delete_column('sponsors_sponsorshiptype', 'display_heading')
models = {
'sponsors.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'aggregate_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sponsors.SponsorshipType']", 'null': 'True', 'blank': 'True'}),
'blurb': ('markupfields.fields.SmartlinksTextileField', [], {'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'sponsors.sponsorshiptype': {
'Meta': {'object_name': 'SponsorshipType'},
'display_heading': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_size': ('django.db.models.fields.IntegerField', [], {}),
'plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'rank': ('django.db.models.fields.IntegerField', [], {}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sponsors'] | 0.327346 | 0.109277 |
#Date Last Modified: 02-13-2014
#Module: delete.py
#Object: delete requested organization, platform, or sensor from database
#Return:
# Copyright (c) 2015, Gulf of Mexico Coastal and Ocean Observing System
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of H-N-Data nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import sqlite3
import pathToDb
import cgi, cgitb
cgitb.enable()
#setup DB
dbconnect = sqlite3.connect(pathToDb.pathToDb)
dbconnect.row_factory = sqlite3.Row
dbh = dbconnect.cursor()
#get form data
data = cgi.FieldStorage()
print(data)
formType = data.getvalue('type')
#check which form the user is submitting
if formType == 'org':
#retrieve all fields from client
shortName = data.getvalue('shortName')
#delete organization
sql = 'DELETE FROM organization WHERE shortname = "' + str(shortName) + '"'
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close()
elif formType == 'plat':
#retrieve all fields from client
name = data.getvalue('name')
#delete platform
sql = 'DELETE FROM platform WHERE name = "' + str(name) + '"'
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close()
elif formType == 'sens':
#retrieve all fields from client
rowid = data.getvalue('rowid')
#delete sensor
sql = 'DELETE FROM sensor WHERE rowid = ' + str(rowid)
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close() | adminForm/delete.py |
#Date Last Modified: 02-13-2014
#Module: delete.py
#Object: delete requested organization, platform, or sensor from database
#Return:
# Copyright (c) 2015, Gulf of Mexico Coastal and Ocean Observing System
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of H-N-Data nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import sqlite3
import pathToDb
import cgi, cgitb
cgitb.enable()
#setup DB
dbconnect = sqlite3.connect(pathToDb.pathToDb)
dbconnect.row_factory = sqlite3.Row
dbh = dbconnect.cursor()
#get form data
data = cgi.FieldStorage()
print(data)
formType = data.getvalue('type')
#check which form the user is submitting
if formType == 'org':
#retrieve all fields from client
shortName = data.getvalue('shortName')
#delete organization
sql = 'DELETE FROM organization WHERE shortname = "' + str(shortName) + '"'
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close()
elif formType == 'plat':
#retrieve all fields from client
name = data.getvalue('name')
#delete platform
sql = 'DELETE FROM platform WHERE name = "' + str(name) + '"'
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close()
elif formType == 'sens':
#retrieve all fields from client
rowid = data.getvalue('rowid')
#delete sensor
sql = 'DELETE FROM sensor WHERE rowid = ' + str(rowid)
print(sql)
dbh.execute(sql)
dbconnect.commit()
dbconnect.close() | 0.403802 | 0.043043 |
import re
from uuid import uuid4
from graphql import graphql
from ..id_type import BaseGlobalIDType, SimpleGlobalIDType, UUIDGlobalIDType
from ..node import Node
from ...types import Int, ObjectType, Schema, String
class TestUUIDGlobalID:
def setup(self):
self.user_list = [
{"id": uuid4(), "name": "First"},
{"id": uuid4(), "name": "Second"},
{"id": uuid4(), "name": "Third"},
{"id": uuid4(), "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomNode(Node):
class Meta:
global_id_type = UUIDGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: UUID!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: UUID!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query userById($id: UUID!) {
user(id: $id) {
id
name
}
}"""
# UUID need to be converted to string for serialization
result = graphql(
self.schema, query, variable_values={"id": str(self.user_list[0]["id"])}
)
assert not result.errors
assert result.data["user"]["id"] == str(self.user_list[0]["id"])
assert result.data["user"]["name"] == self.user_list[0]["name"]
class TestSimpleGlobalID:
def setup(self):
self.user_list = [
{"id": "my global primary key in clear 1", "name": "First"},
{"id": "my global primary key in clear 2", "name": "Second"},
{"id": "my global primary key in clear 3", "name": "Third"},
{"id": "my global primary key in clear 4", "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomNode(Node):
class Meta:
global_id_type = SimpleGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: ID!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: ID!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query {
user(id: "my global primary key in clear 3") {
id
name
}
}"""
result = graphql(self.schema, query)
assert not result.errors
assert result.data["user"]["id"] == self.user_list[2]["id"]
assert result.data["user"]["name"] == self.user_list[2]["name"]
class TestCustomGlobalID:
def setup(self):
self.user_list = [
{"id": 1, "name": "First"},
{"id": 2, "name": "Second"},
{"id": 3, "name": "Third"},
{"id": 4, "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomGlobalIDType(BaseGlobalIDType):
"""
Global id that is simply and integer in clear.
"""
graphene_type = Int
@classmethod
def resolve_global_id(cls, info, global_id):
_type = info.return_type.graphene_type._meta.name
return _type, global_id
@classmethod
def to_global_id(cls, _type, _id):
return _id
class CustomNode(Node):
class Meta:
global_id_type = CustomGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: Int!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: Int!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query {
user(id: 2) {
id
name
}
}"""
result = graphql(self.schema, query)
assert not result.errors
assert result.data["user"]["id"] == self.user_list[1]["id"]
assert result.data["user"]["name"] == self.user_list[1]["name"] | graphene/relay/tests/test_custom_global_id.py | import re
from uuid import uuid4
from graphql import graphql
from ..id_type import BaseGlobalIDType, SimpleGlobalIDType, UUIDGlobalIDType
from ..node import Node
from ...types import Int, ObjectType, Schema, String
class TestUUIDGlobalID:
def setup(self):
self.user_list = [
{"id": uuid4(), "name": "First"},
{"id": uuid4(), "name": "Second"},
{"id": uuid4(), "name": "Third"},
{"id": uuid4(), "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomNode(Node):
class Meta:
global_id_type = UUIDGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: UUID!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: UUID!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query userById($id: UUID!) {
user(id: $id) {
id
name
}
}"""
# UUID need to be converted to string for serialization
result = graphql(
self.schema, query, variable_values={"id": str(self.user_list[0]["id"])}
)
assert not result.errors
assert result.data["user"]["id"] == str(self.user_list[0]["id"])
assert result.data["user"]["name"] == self.user_list[0]["name"]
class TestSimpleGlobalID:
def setup(self):
self.user_list = [
{"id": "my global primary key in clear 1", "name": "First"},
{"id": "my global primary key in clear 2", "name": "Second"},
{"id": "my global primary key in clear 3", "name": "Third"},
{"id": "my global primary key in clear 4", "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomNode(Node):
class Meta:
global_id_type = SimpleGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: ID!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: ID!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query {
user(id: "my global primary key in clear 3") {
id
name
}
}"""
result = graphql(self.schema, query)
assert not result.errors
assert result.data["user"]["id"] == self.user_list[2]["id"]
assert result.data["user"]["name"] == self.user_list[2]["name"]
class TestCustomGlobalID:
def setup(self):
self.user_list = [
{"id": 1, "name": "First"},
{"id": 2, "name": "Second"},
{"id": 3, "name": "Third"},
{"id": 4, "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
class CustomGlobalIDType(BaseGlobalIDType):
"""
Global id that is simply and integer in clear.
"""
graphene_type = Int
@classmethod
def resolve_global_id(cls, info, global_id):
_type = info.return_type.graphene_type._meta.name
return _type, global_id
@classmethod
def to_global_id(cls, _type, _id):
return _id
class CustomNode(Node):
class Meta:
global_id_type = CustomGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
def test_str_schema_correct(self):
"""
Check that the schema has the expected and custom node interface and user type and that they both use UUIDs
"""
parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema))
types = [t for t, f in parsed]
fields = [f for t, f in parsed]
custom_node_interface = "interface CustomNode"
assert custom_node_interface in types
assert "id: Int!" == fields[types.index(custom_node_interface)]
user_type = "type User implements CustomNode"
assert user_type in types
assert "id: Int!\n name: String" == fields[types.index(user_type)]
def test_get_by_id(self):
query = """query {
user(id: 2) {
id
name
}
}"""
result = graphql(self.schema, query)
assert not result.errors
assert result.data["user"]["id"] == self.user_list[1]["id"]
assert result.data["user"]["name"] == self.user_list[1]["name"] | 0.633183 | 0.352536 |
import argparse
import json
import os
import requests
import sys
from subprocess import call
from uuid import uuid4
URL_BASE = "https://api.gdc.cancer.gov/legacy/"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--platform",
choices=["Illumina Human Methylation 450",
"Illumina Human Methylation 27"],
default="Illumina Human Methylation 450",
help="download data from which methylation platform"
)
parser.add_argument(
"--manifest-only",
default=False,
action="store_true",
help="write file manifest, but do not download the files"
)
parser.add_argument(
"--force",
default=False,
action="store_true",
help="force download the files, don't resume a partial download"
)
args = parser.parse_args()
output_name = os.path.join("source/tcga/methylation/", args.platform.replace(" ", ""))
if not os.path.isdir("source/tcga/methylation"):
os.mkdirs("source/tcga/methylation")
output_archive = output_name + ".tar.gz"
if os.path.exists(output_archive) and not args.force:
print('found archive...')
sys.exit(0)
output_manifest = output_name + ".map"
if not os.path.exists(output_manifest) or args.force:
query = {
"op": "and",
"content": [
{
"op": "=",
"content": {
"field": "data_type",
"value": [
"Methylation beta value"
]
}
},
{
"op": "=",
"content": {
"field": "cases.project.program.name",
"value": [
"TCGA"
]
}
},
{
"op": "=",
"content": {
"field": "platform",
"value": [
args.platform
]
}
}
]
}
data = {}
id_map = {}
params = {}
params['filters'] = json.dumps(query)
params['expand'] = "cases.samples,cases.project"
print('querying GDC API...')
while 'size' not in params or \
data['pagination']['page'] < data['pagination']['pages']:
params['size'] = 1000
req = requests.get(URL_BASE + "files", params=params)
data = req.json()['data']
for i in data['hits']:
for case in i["cases"]:
if "samples" in case:
for j in case["samples"]:
id_map[i['id']] = {
"sample": j['sample_id'],
"project": case["project"]["project_id"]
}
else:
id_map[i['id']] = {
"project": case["project"]["project_id"]
}
params['from'] = data['pagination']['from'] + \
data['pagination']['count']
print('processed page', data['pagination']['page'])
print('creating file manifest...')
with open(output_manifest, "w") as handle:
handle.write("file_id\tproject_id\tsample_id\n")
for k, v in id_map.items():
handle.write("%s\t%s\t%s\n" %
(k, v["project"], v.get("sample", "")))
else:
print('found existing file manifest...')
id_map = {}
with open(output_manifest, "r") as fh:
for line in fh:
id_map[line.split("\t")[0]] = None
if args.manifest_only:
sys.exit(0)
def chunks(data, csize):
for i in range(0, len(data), csize):
yield data[i:i + csize]
headers = {'Content-type': 'application/json'}
keychunks = chunks(list(id_map.keys()), 100)
paths = []
print('downloading files...')
headers = {'Content-type': 'application/json'}
for index, ids in enumerate(keychunks):
path = output_name + "-" + str(index)
paths.append(path)
if not os.path.exists(path) or args.force:
print('downloading chunk', str(index) + '...')
r = requests.post(URL_BASE + 'data',
data=json.dumps({"ids": ids}),
headers=headers,
stream=True)
with open(path, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
else:
print('found chunk', str(index) + '...')
print('creating archive...')
random = str(uuid4())
archive = "archive" + "-" + random
manifest = "manifest" + "-" + random
call(["mkdir", archive])
call(["mkdir", manifest])
for index, path in enumerate(paths):
call(["tar", "xzvf", path, "-C", archive + "/"])
call(["rm", "-f", path])
call(["mv", archive + "/MANIFEST.txt", manifest + "/" + str(index)])
call("cat" + manifest + "/* > " + archive + "/MANIFEST.txt", shell=True)
tar = "cd " + archive + " && " + "tar czvf ../" + output_archive + \
" . && cd .."
call(tar, shell=True)
call(["rm", "-rf", archive])
call(["rm", "-rf", manifest]) | transform/tcga/download_methylation.py | import argparse
import json
import os
import requests
import sys
from subprocess import call
from uuid import uuid4
URL_BASE = "https://api.gdc.cancer.gov/legacy/"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--platform",
choices=["Illumina Human Methylation 450",
"Illumina Human Methylation 27"],
default="Illumina Human Methylation 450",
help="download data from which methylation platform"
)
parser.add_argument(
"--manifest-only",
default=False,
action="store_true",
help="write file manifest, but do not download the files"
)
parser.add_argument(
"--force",
default=False,
action="store_true",
help="force download the files, don't resume a partial download"
)
args = parser.parse_args()
output_name = os.path.join("source/tcga/methylation/", args.platform.replace(" ", ""))
if not os.path.isdir("source/tcga/methylation"):
os.mkdirs("source/tcga/methylation")
output_archive = output_name + ".tar.gz"
if os.path.exists(output_archive) and not args.force:
print('found archive...')
sys.exit(0)
output_manifest = output_name + ".map"
if not os.path.exists(output_manifest) or args.force:
query = {
"op": "and",
"content": [
{
"op": "=",
"content": {
"field": "data_type",
"value": [
"Methylation beta value"
]
}
},
{
"op": "=",
"content": {
"field": "cases.project.program.name",
"value": [
"TCGA"
]
}
},
{
"op": "=",
"content": {
"field": "platform",
"value": [
args.platform
]
}
}
]
}
data = {}
id_map = {}
params = {}
params['filters'] = json.dumps(query)
params['expand'] = "cases.samples,cases.project"
print('querying GDC API...')
while 'size' not in params or \
data['pagination']['page'] < data['pagination']['pages']:
params['size'] = 1000
req = requests.get(URL_BASE + "files", params=params)
data = req.json()['data']
for i in data['hits']:
for case in i["cases"]:
if "samples" in case:
for j in case["samples"]:
id_map[i['id']] = {
"sample": j['sample_id'],
"project": case["project"]["project_id"]
}
else:
id_map[i['id']] = {
"project": case["project"]["project_id"]
}
params['from'] = data['pagination']['from'] + \
data['pagination']['count']
print('processed page', data['pagination']['page'])
print('creating file manifest...')
with open(output_manifest, "w") as handle:
handle.write("file_id\tproject_id\tsample_id\n")
for k, v in id_map.items():
handle.write("%s\t%s\t%s\n" %
(k, v["project"], v.get("sample", "")))
else:
print('found existing file manifest...')
id_map = {}
with open(output_manifest, "r") as fh:
for line in fh:
id_map[line.split("\t")[0]] = None
if args.manifest_only:
sys.exit(0)
def chunks(data, csize):
for i in range(0, len(data), csize):
yield data[i:i + csize]
headers = {'Content-type': 'application/json'}
keychunks = chunks(list(id_map.keys()), 100)
paths = []
print('downloading files...')
headers = {'Content-type': 'application/json'}
for index, ids in enumerate(keychunks):
path = output_name + "-" + str(index)
paths.append(path)
if not os.path.exists(path) or args.force:
print('downloading chunk', str(index) + '...')
r = requests.post(URL_BASE + 'data',
data=json.dumps({"ids": ids}),
headers=headers,
stream=True)
with open(path, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
else:
print('found chunk', str(index) + '...')
print('creating archive...')
random = str(uuid4())
archive = "archive" + "-" + random
manifest = "manifest" + "-" + random
call(["mkdir", archive])
call(["mkdir", manifest])
for index, path in enumerate(paths):
call(["tar", "xzvf", path, "-C", archive + "/"])
call(["rm", "-f", path])
call(["mv", archive + "/MANIFEST.txt", manifest + "/" + str(index)])
call("cat" + manifest + "/* > " + archive + "/MANIFEST.txt", shell=True)
tar = "cd " + archive + " && " + "tar czvf ../" + output_archive + \
" . && cd .."
call(tar, shell=True)
call(["rm", "-rf", archive])
call(["rm", "-rf", manifest]) | 0.144209 | 0.150684 |
import pytest
import json
import os
from contextlib import contextmanager
from newrelic.api.application import (application_instance as
current_application)
from newrelic.api.background_task import BackgroundTask
from newrelic.core.rules_engine import SegmentCollapseEngine
from newrelic.core.agent import agent_instance
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
JSON_DIR = os.path.normpath(os.path.join(CURRENT_DIR, 'fixtures'))
OUTBOUD_REQUESTS = {}
_parameters_list = ['testname', 'transaction_segment_terms', 'tests']
def load_tests():
result = []
path = os.path.join(JSON_DIR, 'transaction_segment_terms.json')
with open(path, 'r') as fh:
tests = json.load(fh)
for test in tests:
values = tuple([test.get(param, None) for param in _parameters_list])
result.append(values)
return result
_parameters = ",".join(_parameters_list)
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_segments(testname, transaction_segment_terms, tests):
engine = SegmentCollapseEngine(transaction_segment_terms)
for test in tests:
assert engine.normalize(test['input'])[0] == test['expected']
@contextmanager
def segment_rules(name, rules):
application = agent_instance().application(name)
old_rules = application._rules_engine['segment']
new_rules = SegmentCollapseEngine(rules)
application._rules_engine['segment'] = new_rules
yield
application._rules_engine['segment'] = old_rules
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_freeze_path_segments(testname, transaction_segment_terms,
tests):
application = current_application()
# We can't check all possibilites by doing things via the transaction
# as it not possible to set up a metric path of only one segment.
with segment_rules(application.name, transaction_segment_terms):
for test in tests:
segments = test['input'].split()
if len(segments) < 2:
continue
ttype = segments[0]
group = '/'.join(segments[1:2])
name = '/'.join(segments[2:])
with BackgroundTask(application, name, group) as transaction:
transaction.background_task = (ttype == 'OtherTransaction')
assert transaction.path == test['expected'] | tests/cross_agent/test_transaction_segment_terms.py |
import pytest
import json
import os
from contextlib import contextmanager
from newrelic.api.application import (application_instance as
current_application)
from newrelic.api.background_task import BackgroundTask
from newrelic.core.rules_engine import SegmentCollapseEngine
from newrelic.core.agent import agent_instance
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
JSON_DIR = os.path.normpath(os.path.join(CURRENT_DIR, 'fixtures'))
OUTBOUD_REQUESTS = {}
_parameters_list = ['testname', 'transaction_segment_terms', 'tests']
def load_tests():
result = []
path = os.path.join(JSON_DIR, 'transaction_segment_terms.json')
with open(path, 'r') as fh:
tests = json.load(fh)
for test in tests:
values = tuple([test.get(param, None) for param in _parameters_list])
result.append(values)
return result
_parameters = ",".join(_parameters_list)
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_segments(testname, transaction_segment_terms, tests):
engine = SegmentCollapseEngine(transaction_segment_terms)
for test in tests:
assert engine.normalize(test['input'])[0] == test['expected']
@contextmanager
def segment_rules(name, rules):
application = agent_instance().application(name)
old_rules = application._rules_engine['segment']
new_rules = SegmentCollapseEngine(rules)
application._rules_engine['segment'] = new_rules
yield
application._rules_engine['segment'] = old_rules
@pytest.mark.parametrize(_parameters, load_tests())
def test_transaction_freeze_path_segments(testname, transaction_segment_terms,
tests):
application = current_application()
# We can't check all possibilites by doing things via the transaction
# as it not possible to set up a metric path of only one segment.
with segment_rules(application.name, transaction_segment_terms):
for test in tests:
segments = test['input'].split()
if len(segments) < 2:
continue
ttype = segments[0]
group = '/'.join(segments[1:2])
name = '/'.join(segments[2:])
with BackgroundTask(application, name, group) as transaction:
transaction.background_task = (ttype == 'OtherTransaction')
assert transaction.path == test['expected'] | 0.49585 | 0.315393 |
from __future__ import print_function
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets.utils import download_url
import os
import errno
import numpy as np
import pandas as pd
class CCLE_Dataset(torch.utils.data.Dataset):
"""`CCLE` dataset from paper:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2012).
The Cancer Cell Line Encyclopedia enables predictive modelling of anticancer drug sensitivity.
Nature, 483(7391), 603.
Note:
The X dataset is z-scored, which means that if it is partitioned into a training and test split, these have
to be re-z-scored according to the training split
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
task (string): Which task should provide output among:
['Bakery', 'Sour','Intensity','Sweet','Burnt','Pleasantness','Fish', 'Fruit','Garlic','Spices',
'Cold','Acid','Warm', Musky','Sweaty','Ammonia','Decayed','Wood','Grass', 'Flower','Chemical']
train (bool, optional): If True, creates dataset from the training split,
otherwise from the test split. The two split come from the same partition if the
random seed `seed` is the same.
test_size (int, float): how much data has to be reserved for test.
If test_size is int it will indicate the number of samples. If it's a float, it's the fraction
of samples over the total.
shuffle_target (bool): If True, it shuffle the targets (Y) compared to data (X) breaking the dependence between
X and Y, such that P(X,Y) = P(X)P(Y). If False, X and Y are sampled together from P(X,Y).
seed (int): seed of random number generator
z_score (bool): whether to z-score X features or not. z-score statistics are always computed on the training split.
Also, note that the whole X dataset is already z-score (see note above).
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
urls = [
'https://www.dropbox.com/s/7iy0ght31hxhn7d/mutation.txt',
'https://www.dropbox.com/s/bplwquwbc7zleck/expression.txt',
'https://www.dropbox.com/s/78mp3ebnb4h6jsy/response.csv',
]
download_option = '?dl=1'
files = [
'mutation.txt',
'expression.txt',
'response.csv'
]
def __init__(self, root, task = 'PLX4720', feature_type ='both', train=True, test_size=0.1,
shuffle_targets=False, seed=1, z_score=True, download=True, verbose = False, parent_dataset = None):
self.root = os.path.expanduser(root)
if not isinstance(feature_type, str) or feature_type not in ['mutation', 'expression', 'both']:
raise ValueError('task must be one of the following task descriptors: ' + str(['mutation', 'expression', 'both']))
else:
self.feature_type = feature_type
self.fea_groundtruth = ['C11orf85', 'FXYD4', 'SLC28A2', 'MAML3_MUT', 'RAD51L1_MUT', 'GAPDHS', 'BRAF_MUT']
self.task = task # drug target
self.train = train
self.shuffle_targets = shuffle_targets
self.verbose = verbose
# Random number generator
self.rng = np.random.RandomState(seed)
if parent_dataset is None:
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
self.full_data, self.full_targets, self.features, all_features = self.load_data()
if isinstance(test_size, float):
if test_size > 1.0 or test_size < 0.0:
raise ValueError('test_size must be integer or a float between 0.0 and 1.0')
else:
self.test_size = int(len(self.full_data) * test_size)
elif isinstance(test_size, int):
if test_size >= len(self.full_data) or test_size < 0:
raise ValueError('integer test_size must be between 0 and {}'.format(len(self.full_data)))
else:
self.test_size = test_size
# Permutation indices:
perm = self.rng.permutation(len(self.full_data))
self.ind_train = perm[self.test_size:]
self.ind_test = perm[:self.test_size]
else:
self.full_data = parent_dataset.full_data
self.full_targets = parent_dataset.full_targets
self.features = parent_dataset.features
self.ind_train = parent_dataset.ind_train
self.ind_test = parent_dataset.ind_test
# get feature indexes
self.fea_groundtruth_idx = [self.features.get_loc(ftr) for ftr in self.fea_groundtruth]
if self.train:
self.data, self.targets = self.full_data[self.ind_train], self.full_targets[self.ind_train]
else:
self.data, self.targets = self.full_data[self.ind_test], self.full_targets[self.ind_test]
# z-score according to training split
if z_score:
mu = np.mean(self.full_data[self.ind_train], 0)
sd = np.std(self.full_data[self.ind_train], 0) + 1e-6
self.data = (self.data - mu) / sd
mu = np.mean(self.full_targets[self.ind_train], 0)
sd = np.std(self.full_targets[self.ind_train], 0) + 1e-6
self.targets = (self.targets - mu) / sd
self.z_score = z_score
self.data, self.targets = torch.FloatTensor(self.data), torch.FloatTensor(self.targets)
def get_feature_names(self):
return self.features.values
def get_groundtruth_features(self):
return self.fea_groundtruth_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.shuffle_targets:
y_index = self.rng.randint(len(self.targets))
else:
y_index = index
return self.data[index], self.targets[y_index]
def __len__(self):
return len(self.data)
def _check_exists(self):
return all(map(lambda f: os.path.exists(os.path.join(self.root, f)), self.files))
def download(self):
"""Download the olfaction data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url + self.download_option, root=self.root, filename=filename, md5=None)
def ccle_feature_filter(self, X, y, threshold=0.1):
# Remove all features that do not have at least pearson correlation at threshold with y
corrs = np.array([np.abs(np.corrcoef(x, y)[0,1]) if x.std() > 0 else 0 for x in X.T])
selected = corrs >= threshold
print(selected.sum(), selected.shape, corrs[34758])
return selected, corrs
def load_data(self):
X_drugs, y_drugs, drugs, cells, features = self.load_ccle()
drug_idx = drugs.get_loc(self.task)
if self.verbose:
print('Drug {}'.format(drugs[drug_idx]))
X_drug, y_drug = X_drugs[drug_idx], y_drugs[drug_idx]
# Specific to PLX4720. Filters out all features with pearson correlation less than 0.1 in magnitude
if self.verbose:
print('Filtering by correlation with signal first')
ccle_selected, corrs = self.ccle_feature_filter(X_drug, y_drug)
# keeps the ground truth features
for plx4720_feat in self.fea_groundtruth:
idx = features.get_loc(plx4720_feat)
ccle_selected[idx] = True
if self.verbose:
print('Correlation for {}: {:.4f}'.format(plx4720_feat, corrs[idx]))
ccle_features = features[ccle_selected]
# uses data from filtered features only
X_drug = X_drug[:, np.nonzero(ccle_selected)[0]]
return X_drug, y_drug, ccle_features, features
def load_ccle(self):
r"""Load CCLE dataset
This method is based on the code in https://github.com/tansey/hrt/blob/master/examples/ccle/main.py
published together with the paper Tansey et al. (http://arxiv.org/abs/1811.00645)
and is subject to the following license:
The MIT License (MIT)
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if self.feature_type in ['expression', 'both']:
# Load gene expression
expression = pd.read_csv(os.path.join(self.root, self.files[1]), delimiter='\t', header=2, index_col=1).iloc[:,1:]
expression.columns = [c.split(' (ACH')[0] for c in expression.columns]
features = expression
if self.feature_type in ['mutation', 'both']:
# Load gene mutation
mutations = pd.read_csv(os.path.join(self.root, self.files[0]), delimiter='\t', header=2, index_col=1).iloc[:,1:]
mutations = mutations.iloc[[c.endswith('_MUT') for c in mutations.index]]
features = mutations
if self.feature_type == 'both':
both_cells = set(expression.columns) & set(mutations.columns)
z = {}
for c in both_cells:
exp = expression[c].values
if len(exp.shape) > 1:
exp = exp[:,0]
z[c] = np.concatenate([exp, mutations[c].values])
both_df = pd.DataFrame(z, index=[c for c in expression.index] + [c for c in mutations.index])
features = both_df
response = pd.read_csv(os.path.join(self.root, self.files[2]), header=0, index_col=[0,2])
# Get per-drug X and y regression targets
cells = response.index.levels[0]
drugs = response.index.levels[1]
X_drugs = [[] for _ in drugs]
y_drugs = [[] for _ in drugs]
for j, drug in enumerate(drugs):
if self.task is not None and drug != self.task:
continue
for i,cell in enumerate(cells):
if cell not in features.columns or (cell, drug) not in response.index:
continue
X_drugs[j].append(features[cell].values)
y_drugs[j].append(response.loc[(cell,drug), 'Amax'])
print('{}: {}'.format(drug, len(y_drugs[j])))
X_drugs = [np.array(x_i) for x_i in X_drugs]
y_drugs = [np.array(y_i) for y_i in y_drugs]
return X_drugs, y_drugs, drugs, cells, features.index
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' CCLE Task: {}\n'.format(self.task)
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Z-Score: {}\n'.format(self.z_score)
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
if __name__=="__main__":
DIR_DATASET = '~/data/ccle'
# Common random seed to all datasets:
random_seed = 123
# P(X,X) distribution:
trainset = CCLE_Dataset(DIR_DATASET, train = True)
print (trainset)
tr_P = DataLoader(trainset, batch_size=50, shuffle=True, num_workers=1)
trainset_t = CCLE_Dataset(DIR_DATASET, train = False, parent_dataset = trainset)
print (trainset_t)
tr_P_t = DataLoader(trainset_t, batch_size=50, shuffle=True, num_workers=1) | datasets/ccle_dataset.py | from __future__ import print_function
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets.utils import download_url
import os
import errno
import numpy as np
import pandas as pd
class CCLE_Dataset(torch.utils.data.Dataset):
"""`CCLE` dataset from paper:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2012).
The Cancer Cell Line Encyclopedia enables predictive modelling of anticancer drug sensitivity.
Nature, 483(7391), 603.
Note:
The X dataset is z-scored, which means that if it is partitioned into a training and test split, these have
to be re-z-scored according to the training split
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
task (string): Which task should provide output among:
['Bakery', 'Sour','Intensity','Sweet','Burnt','Pleasantness','Fish', 'Fruit','Garlic','Spices',
'Cold','Acid','Warm', Musky','Sweaty','Ammonia','Decayed','Wood','Grass', 'Flower','Chemical']
train (bool, optional): If True, creates dataset from the training split,
otherwise from the test split. The two split come from the same partition if the
random seed `seed` is the same.
test_size (int, float): how much data has to be reserved for test.
If test_size is int it will indicate the number of samples. If it's a float, it's the fraction
of samples over the total.
shuffle_target (bool): If True, it shuffle the targets (Y) compared to data (X) breaking the dependence between
X and Y, such that P(X,Y) = P(X)P(Y). If False, X and Y are sampled together from P(X,Y).
seed (int): seed of random number generator
z_score (bool): whether to z-score X features or not. z-score statistics are always computed on the training split.
Also, note that the whole X dataset is already z-score (see note above).
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
urls = [
'https://www.dropbox.com/s/7iy0ght31hxhn7d/mutation.txt',
'https://www.dropbox.com/s/bplwquwbc7zleck/expression.txt',
'https://www.dropbox.com/s/78mp3ebnb4h6jsy/response.csv',
]
download_option = '?dl=1'
files = [
'mutation.txt',
'expression.txt',
'response.csv'
]
def __init__(self, root, task = 'PLX4720', feature_type ='both', train=True, test_size=0.1,
shuffle_targets=False, seed=1, z_score=True, download=True, verbose = False, parent_dataset = None):
self.root = os.path.expanduser(root)
if not isinstance(feature_type, str) or feature_type not in ['mutation', 'expression', 'both']:
raise ValueError('task must be one of the following task descriptors: ' + str(['mutation', 'expression', 'both']))
else:
self.feature_type = feature_type
self.fea_groundtruth = ['C11orf85', 'FXYD4', 'SLC28A2', 'MAML3_MUT', 'RAD51L1_MUT', 'GAPDHS', 'BRAF_MUT']
self.task = task # drug target
self.train = train
self.shuffle_targets = shuffle_targets
self.verbose = verbose
# Random number generator
self.rng = np.random.RandomState(seed)
if parent_dataset is None:
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
self.full_data, self.full_targets, self.features, all_features = self.load_data()
if isinstance(test_size, float):
if test_size > 1.0 or test_size < 0.0:
raise ValueError('test_size must be integer or a float between 0.0 and 1.0')
else:
self.test_size = int(len(self.full_data) * test_size)
elif isinstance(test_size, int):
if test_size >= len(self.full_data) or test_size < 0:
raise ValueError('integer test_size must be between 0 and {}'.format(len(self.full_data)))
else:
self.test_size = test_size
# Permutation indices:
perm = self.rng.permutation(len(self.full_data))
self.ind_train = perm[self.test_size:]
self.ind_test = perm[:self.test_size]
else:
self.full_data = parent_dataset.full_data
self.full_targets = parent_dataset.full_targets
self.features = parent_dataset.features
self.ind_train = parent_dataset.ind_train
self.ind_test = parent_dataset.ind_test
# get feature indexes
self.fea_groundtruth_idx = [self.features.get_loc(ftr) for ftr in self.fea_groundtruth]
if self.train:
self.data, self.targets = self.full_data[self.ind_train], self.full_targets[self.ind_train]
else:
self.data, self.targets = self.full_data[self.ind_test], self.full_targets[self.ind_test]
# z-score according to training split
if z_score:
mu = np.mean(self.full_data[self.ind_train], 0)
sd = np.std(self.full_data[self.ind_train], 0) + 1e-6
self.data = (self.data - mu) / sd
mu = np.mean(self.full_targets[self.ind_train], 0)
sd = np.std(self.full_targets[self.ind_train], 0) + 1e-6
self.targets = (self.targets - mu) / sd
self.z_score = z_score
self.data, self.targets = torch.FloatTensor(self.data), torch.FloatTensor(self.targets)
def get_feature_names(self):
return self.features.values
def get_groundtruth_features(self):
return self.fea_groundtruth_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.shuffle_targets:
y_index = self.rng.randint(len(self.targets))
else:
y_index = index
return self.data[index], self.targets[y_index]
def __len__(self):
return len(self.data)
def _check_exists(self):
return all(map(lambda f: os.path.exists(os.path.join(self.root, f)), self.files))
def download(self):
"""Download the olfaction data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
filename = url.rpartition('/')[2]
download_url(url + self.download_option, root=self.root, filename=filename, md5=None)
def ccle_feature_filter(self, X, y, threshold=0.1):
# Remove all features that do not have at least pearson correlation at threshold with y
corrs = np.array([np.abs(np.corrcoef(x, y)[0,1]) if x.std() > 0 else 0 for x in X.T])
selected = corrs >= threshold
print(selected.sum(), selected.shape, corrs[34758])
return selected, corrs
def load_data(self):
X_drugs, y_drugs, drugs, cells, features = self.load_ccle()
drug_idx = drugs.get_loc(self.task)
if self.verbose:
print('Drug {}'.format(drugs[drug_idx]))
X_drug, y_drug = X_drugs[drug_idx], y_drugs[drug_idx]
# Specific to PLX4720. Filters out all features with pearson correlation less than 0.1 in magnitude
if self.verbose:
print('Filtering by correlation with signal first')
ccle_selected, corrs = self.ccle_feature_filter(X_drug, y_drug)
# keeps the ground truth features
for plx4720_feat in self.fea_groundtruth:
idx = features.get_loc(plx4720_feat)
ccle_selected[idx] = True
if self.verbose:
print('Correlation for {}: {:.4f}'.format(plx4720_feat, corrs[idx]))
ccle_features = features[ccle_selected]
# uses data from filtered features only
X_drug = X_drug[:, np.nonzero(ccle_selected)[0]]
return X_drug, y_drug, ccle_features, features
def load_ccle(self):
r"""Load CCLE dataset
This method is based on the code in https://github.com/tansey/hrt/blob/master/examples/ccle/main.py
published together with the paper Tansey et al. (http://arxiv.org/abs/1811.00645)
and is subject to the following license:
The MIT License (MIT)
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if self.feature_type in ['expression', 'both']:
# Load gene expression
expression = pd.read_csv(os.path.join(self.root, self.files[1]), delimiter='\t', header=2, index_col=1).iloc[:,1:]
expression.columns = [c.split(' (ACH')[0] for c in expression.columns]
features = expression
if self.feature_type in ['mutation', 'both']:
# Load gene mutation
mutations = pd.read_csv(os.path.join(self.root, self.files[0]), delimiter='\t', header=2, index_col=1).iloc[:,1:]
mutations = mutations.iloc[[c.endswith('_MUT') for c in mutations.index]]
features = mutations
if self.feature_type == 'both':
both_cells = set(expression.columns) & set(mutations.columns)
z = {}
for c in both_cells:
exp = expression[c].values
if len(exp.shape) > 1:
exp = exp[:,0]
z[c] = np.concatenate([exp, mutations[c].values])
both_df = pd.DataFrame(z, index=[c for c in expression.index] + [c for c in mutations.index])
features = both_df
response = pd.read_csv(os.path.join(self.root, self.files[2]), header=0, index_col=[0,2])
# Get per-drug X and y regression targets
cells = response.index.levels[0]
drugs = response.index.levels[1]
X_drugs = [[] for _ in drugs]
y_drugs = [[] for _ in drugs]
for j, drug in enumerate(drugs):
if self.task is not None and drug != self.task:
continue
for i,cell in enumerate(cells):
if cell not in features.columns or (cell, drug) not in response.index:
continue
X_drugs[j].append(features[cell].values)
y_drugs[j].append(response.loc[(cell,drug), 'Amax'])
print('{}: {}'.format(drug, len(y_drugs[j])))
X_drugs = [np.array(x_i) for x_i in X_drugs]
y_drugs = [np.array(y_i) for y_i in y_drugs]
return X_drugs, y_drugs, drugs, cells, features.index
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' CCLE Task: {}\n'.format(self.task)
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Z-Score: {}\n'.format(self.z_score)
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
if __name__=="__main__":
DIR_DATASET = '~/data/ccle'
# Common random seed to all datasets:
random_seed = 123
# P(X,X) distribution:
trainset = CCLE_Dataset(DIR_DATASET, train = True)
print (trainset)
tr_P = DataLoader(trainset, batch_size=50, shuffle=True, num_workers=1)
trainset_t = CCLE_Dataset(DIR_DATASET, train = False, parent_dataset = trainset)
print (trainset_t)
tr_P_t = DataLoader(trainset_t, batch_size=50, shuffle=True, num_workers=1) | 0.773045 | 0.641366 |
from __future__ import unicode_literals
import sys
import json
import csv
if sys.version_info[0] < 3:
from backports import csv
from io import open
from io import StringIO
import ads
MANUAL_ADDITION = [
'2019JCAP...10..035H',
'2019arXiv190805276D',
'2019arXiv190104454S',
'2018JCAP...11..009D',
'2018JCAP...10..028M',
'2018JCAP...07..043F',
'2017JCAP...12..009S',
'2019JCAP...11..023M',
'2019arXiv190309049H '
]
NON_JOURNAL_ENTRIES = [
]
NON_JOURNAL_BIBSTEM = ['BAAS']
MANUAL_EXCLUSION = []
EXCLUDED_BIBSTEM = ['AAS', 'ehep.conf']
def fix_csv_header(header_line):
header_line = header_line.replace('"author"', '"authors"')
header_line = header_line.replace('"pubdate"', '"date"')
header_line = header_line.replace('"page,page_range"', '"page"')
header_line = header_line.replace('"pub"', '"journal"')
header_line = header_line.replace('"eid,identifier"', '"arxiv"')
header_line += ',"type"'
return header_line
def retrieve_csv_from_ads(bibcodes):
export_service = ads.ExportQuery(bibcodes)
export_service.format = 'custom'
export_service.json_payload = json.dumps({
'bibcode': bibcodes,
'format': '%ZEncoding:csv %R %D %T %L %Y %q %V %p %X %d"journal-paper"\n'
})
header, _, content = export_service.execute().partition('\n')
return fix_csv_header(header) + '\n' + content
def apply_row_fixes(row, non_journal_entries=tuple(NON_JOURNAL_ENTRIES),
non_journal_bibstem=tuple(NON_JOURNAL_BIBSTEM)):
if row['journal'] in non_journal_bibstem or row['bibcode'] in non_journal_entries:
row['type'] = 'non-journal'
return row
def update_and_write_csv(csv_text, output_path='publications.csv'):
reader = csv.DictReader(StringIO(csv_text))
with open(output_path, 'w') as output:
writer = csv.DictWriter(output, fieldnames=reader.fieldnames,
lineterminator='\n')
writer.writeheader()
for row in reader:
writer.writerow(apply_row_fixes(row))
def main():
bibcodes = list(MANUAL_ADDITION)
update_and_write_csv(retrieve_csv_from_ads(bibcodes))
if __name__ == '__main__':
main() | docs/_data/_generate_publications_csv.py | from __future__ import unicode_literals
import sys
import json
import csv
if sys.version_info[0] < 3:
from backports import csv
from io import open
from io import StringIO
import ads
MANUAL_ADDITION = [
'2019JCAP...10..035H',
'2019arXiv190805276D',
'2019arXiv190104454S',
'2018JCAP...11..009D',
'2018JCAP...10..028M',
'2018JCAP...07..043F',
'2017JCAP...12..009S',
'2019JCAP...11..023M',
'2019arXiv190309049H '
]
NON_JOURNAL_ENTRIES = [
]
NON_JOURNAL_BIBSTEM = ['BAAS']
MANUAL_EXCLUSION = []
EXCLUDED_BIBSTEM = ['AAS', 'ehep.conf']
def fix_csv_header(header_line):
header_line = header_line.replace('"author"', '"authors"')
header_line = header_line.replace('"pubdate"', '"date"')
header_line = header_line.replace('"page,page_range"', '"page"')
header_line = header_line.replace('"pub"', '"journal"')
header_line = header_line.replace('"eid,identifier"', '"arxiv"')
header_line += ',"type"'
return header_line
def retrieve_csv_from_ads(bibcodes):
export_service = ads.ExportQuery(bibcodes)
export_service.format = 'custom'
export_service.json_payload = json.dumps({
'bibcode': bibcodes,
'format': '%ZEncoding:csv %R %D %T %L %Y %q %V %p %X %d"journal-paper"\n'
})
header, _, content = export_service.execute().partition('\n')
return fix_csv_header(header) + '\n' + content
def apply_row_fixes(row, non_journal_entries=tuple(NON_JOURNAL_ENTRIES),
non_journal_bibstem=tuple(NON_JOURNAL_BIBSTEM)):
if row['journal'] in non_journal_bibstem or row['bibcode'] in non_journal_entries:
row['type'] = 'non-journal'
return row
def update_and_write_csv(csv_text, output_path='publications.csv'):
reader = csv.DictReader(StringIO(csv_text))
with open(output_path, 'w') as output:
writer = csv.DictWriter(output, fieldnames=reader.fieldnames,
lineterminator='\n')
writer.writeheader()
for row in reader:
writer.writerow(apply_row_fixes(row))
def main():
bibcodes = list(MANUAL_ADDITION)
update_and_write_csv(retrieve_csv_from_ads(bibcodes))
if __name__ == '__main__':
main() | 0.242026 | 0.080755 |
import os
import argparse
import warnings
from itertools import chain
import glob
from collections import defaultdict
import numpy as np
import h5py
def _check_pathes(pathes, strict=True):
filtered = []
for path in pathes:
if h5py.is_hdf5(path):
filtered.append(path)
else:
if strict:
raise ValueError("{} is not an HDF5 file".format(path))
else:
warnings.warn("Dropping {} since it isn't HDF5 file".format(path))
return filtered
class Glob_HDF5(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
pathes = []
for path in values:
pathes.extend(glob.glob(path))
if not len(pathes):
raise ValueError("The glob expansion result is empty")
filtered = _check_pathes(pathes, strict=False)
final = list(set(filtered))
setattr(namespace, self.dest, final)
def get_all_keys(obj, keys=None):
if keys == None:
keys=[]
keys.append(obj.name)
if isinstance(obj, h5py.Group):
for item in obj:
if isinstance(obj[item], h5py.Group):
get_all_keys(obj[item], keys)
else: # isinstance(obj[item], h5py.Dataset):
keys.append(obj[item].name)
return keys
def _depth(hdf_key):
return hdf_key.count('/')
class Merger():
'''Simple CLI utility to merge HDF5 files with chi-square maps
after likelihood profiling in different segments of the grid
produced by scan module. Top-level attributes are assumed to be identical for all
merged files'''
def __init__(self, opts):
with h5py.File(opts.output, 'w') as f:
for path in opts.input:
input_file = h5py.File(path, 'r')
for key in input_file:
try:
# easy case: recursively copy entire group
input_file.copy(key, f)
except ValueError as e:
# hard case: the group got splitted between files and
# simply copying won't work, need to identify what
# groups already in the output and update it and
# then copy others
keys_in_input = set(get_all_keys(input_file[key]))
keys_in_ouput = set(get_all_keys(f[key]))
missing_keys = list(keys_in_input.difference(keys_in_ouput))
# sort keys so groups come before datasets
missing_keys.sort(key=_depth)
# make sure each missing group is created, attributes
# and datasets are copied
for missed_key in missing_keys:
input_object = input_file[missed_key]
if isinstance(input_object, h5py.Group):
f.require_group(missed_key)
for name, val in input_object.attrs.items():
f[missed_key].attrs.create(name, val)
if isinstance(input_object, h5py.Dataset):
f.create_dataset(missed_key, data=input_object[:])
for attr, value in input_file['/'].attrs.items():
f['/'].attrs[attr] = value
input_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='''Simple tool to merge HDF5 after
likelihood profiling by scan module''')
parser.add_argument('input', nargs='*', type=os.path.abspath,
action=Glob_HDF5, help='List of HDF5 files to merge with possible globbing, duplicates are removed')
parser.add_argument('--output', type=os.path.abspath, required=True,
help='Path to merged output file')
opts = parser.parse_args()
merger = Merger(opts) | macro/merge-hdf5.py | import os
import argparse
import warnings
from itertools import chain
import glob
from collections import defaultdict
import numpy as np
import h5py
def _check_pathes(pathes, strict=True):
filtered = []
for path in pathes:
if h5py.is_hdf5(path):
filtered.append(path)
else:
if strict:
raise ValueError("{} is not an HDF5 file".format(path))
else:
warnings.warn("Dropping {} since it isn't HDF5 file".format(path))
return filtered
class Glob_HDF5(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
pathes = []
for path in values:
pathes.extend(glob.glob(path))
if not len(pathes):
raise ValueError("The glob expansion result is empty")
filtered = _check_pathes(pathes, strict=False)
final = list(set(filtered))
setattr(namespace, self.dest, final)
def get_all_keys(obj, keys=None):
if keys == None:
keys=[]
keys.append(obj.name)
if isinstance(obj, h5py.Group):
for item in obj:
if isinstance(obj[item], h5py.Group):
get_all_keys(obj[item], keys)
else: # isinstance(obj[item], h5py.Dataset):
keys.append(obj[item].name)
return keys
def _depth(hdf_key):
return hdf_key.count('/')
class Merger():
'''Simple CLI utility to merge HDF5 files with chi-square maps
after likelihood profiling in different segments of the grid
produced by scan module. Top-level attributes are assumed to be identical for all
merged files'''
def __init__(self, opts):
with h5py.File(opts.output, 'w') as f:
for path in opts.input:
input_file = h5py.File(path, 'r')
for key in input_file:
try:
# easy case: recursively copy entire group
input_file.copy(key, f)
except ValueError as e:
# hard case: the group got splitted between files and
# simply copying won't work, need to identify what
# groups already in the output and update it and
# then copy others
keys_in_input = set(get_all_keys(input_file[key]))
keys_in_ouput = set(get_all_keys(f[key]))
missing_keys = list(keys_in_input.difference(keys_in_ouput))
# sort keys so groups come before datasets
missing_keys.sort(key=_depth)
# make sure each missing group is created, attributes
# and datasets are copied
for missed_key in missing_keys:
input_object = input_file[missed_key]
if isinstance(input_object, h5py.Group):
f.require_group(missed_key)
for name, val in input_object.attrs.items():
f[missed_key].attrs.create(name, val)
if isinstance(input_object, h5py.Dataset):
f.create_dataset(missed_key, data=input_object[:])
for attr, value in input_file['/'].attrs.items():
f['/'].attrs[attr] = value
input_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='''Simple tool to merge HDF5 after
likelihood profiling by scan module''')
parser.add_argument('input', nargs='*', type=os.path.abspath,
action=Glob_HDF5, help='List of HDF5 files to merge with possible globbing, duplicates are removed')
parser.add_argument('--output', type=os.path.abspath, required=True,
help='Path to merged output file')
opts = parser.parse_args()
merger = Merger(opts) | 0.427994 | 0.14069 |
import time
# bin packing problem
class Shelf(object):
"""
Single shelf object for items that keeps a running sum.
"""
def __init__(self, W):
self.items = []
self.sum = 0
self.width = W
def append(self, item):
self.items.append(item)
self.sum += item
self.space = self.width - self.sum
def __str__(self):
"""
Printable representation
"""
return 'Shelf(sum=%d, items=%s)' % (self.sum, str(self.items))
def pack(widths, W, verbose=True):
"""
Main packing function with first fit descending.
Input:
widths: list of numbers that represents the widths of all the objects
W: width of single shelf
Return:
Total number of shelves needed to allocate all the objects.
"""
# sort the objects with decreasing width
widths = sorted(widths, reverse=True)
shelves = [] # initialze the shelves
for item in widths:
# Try to fit item into one shelf
for shelf in shelves:
if shelf.sum + item <= shelf.width:
if verbose:
print('Adding', item, 'to', shelf)
shelf.append(item)
break
else:
# item didn't fit into shelf, start a new shelf
if verbose:
print('Making new shelf for', item)
shelf = Shelf(W)
shelf.append(item)
shelves.append(shelf)
return len(shelves), shelves
# brute-force solution
def permuteUnique(nums):
"""
To generate only unique permutations
"""
res = [[]]
for n in nums:
res = [l[: i] + [n] + l[i:]
for l in res
for i in range((l + [n]).index(n) + 1)]
return res
def pack_brute(widths, W, verbose=0, timeout=60):
"""
Main packing function for the brute-force solution
Input:
widths: list of numbers that represents the widths of all the objects
W: width of single shelf
Return:
Total number of shelves needed to allocate all the objects.
"""
def _above_timeout(t_start, timeout):
if time.time() - t_start > timeout:
return True
return False
t_start = time.time()
print('making permutations...')
all_widths = permuteUnique(widths) # create all the permutations
print('made!')
min_shelves = len(widths)
best_shelves = []
if _above_timeout(t_start, timeout):
return None, None
for i, width in enumerate(all_widths):
if verbose > 0:
print('Testing {} of total {} possibilities'
.format(i, len(all_widths)))
shelves = [] # initialze the shelves
for item in width:
if len(shelves) > min_shelves: # no need to continue
break
# Try to fit item into one shelf
for shelf in shelves:
if _above_timeout(t_start, timeout):
return None, None
if shelf.sum + item <= shelf.width:
if verbose > 1:
print('Adding', item, 'to', shelf)
shelf.append(item)
break
else:
# item didn't fit into shelf, start a new shelf
if verbose > 1:
print('Making new shelf for', item)
shelf = Shelf(W)
shelf.append(item)
shelves.append(shelf)
# update the min_shelves
if len(shelves) < min_shelves:
min_shelves = len(shelves)
best_shelves = shelves[:]
return min_shelves, best_shelves | flask_citibike/bin_packing.py | import time
# bin packing problem
class Shelf(object):
"""
Single shelf object for items that keeps a running sum.
"""
def __init__(self, W):
self.items = []
self.sum = 0
self.width = W
def append(self, item):
self.items.append(item)
self.sum += item
self.space = self.width - self.sum
def __str__(self):
"""
Printable representation
"""
return 'Shelf(sum=%d, items=%s)' % (self.sum, str(self.items))
def pack(widths, W, verbose=True):
"""
Main packing function with first fit descending.
Input:
widths: list of numbers that represents the widths of all the objects
W: width of single shelf
Return:
Total number of shelves needed to allocate all the objects.
"""
# sort the objects with decreasing width
widths = sorted(widths, reverse=True)
shelves = [] # initialze the shelves
for item in widths:
# Try to fit item into one shelf
for shelf in shelves:
if shelf.sum + item <= shelf.width:
if verbose:
print('Adding', item, 'to', shelf)
shelf.append(item)
break
else:
# item didn't fit into shelf, start a new shelf
if verbose:
print('Making new shelf for', item)
shelf = Shelf(W)
shelf.append(item)
shelves.append(shelf)
return len(shelves), shelves
# brute-force solution
def permuteUnique(nums):
"""
To generate only unique permutations
"""
res = [[]]
for n in nums:
res = [l[: i] + [n] + l[i:]
for l in res
for i in range((l + [n]).index(n) + 1)]
return res
def pack_brute(widths, W, verbose=0, timeout=60):
"""
Main packing function for the brute-force solution
Input:
widths: list of numbers that represents the widths of all the objects
W: width of single shelf
Return:
Total number of shelves needed to allocate all the objects.
"""
def _above_timeout(t_start, timeout):
if time.time() - t_start > timeout:
return True
return False
t_start = time.time()
print('making permutations...')
all_widths = permuteUnique(widths) # create all the permutations
print('made!')
min_shelves = len(widths)
best_shelves = []
if _above_timeout(t_start, timeout):
return None, None
for i, width in enumerate(all_widths):
if verbose > 0:
print('Testing {} of total {} possibilities'
.format(i, len(all_widths)))
shelves = [] # initialze the shelves
for item in width:
if len(shelves) > min_shelves: # no need to continue
break
# Try to fit item into one shelf
for shelf in shelves:
if _above_timeout(t_start, timeout):
return None, None
if shelf.sum + item <= shelf.width:
if verbose > 1:
print('Adding', item, 'to', shelf)
shelf.append(item)
break
else:
# item didn't fit into shelf, start a new shelf
if verbose > 1:
print('Making new shelf for', item)
shelf = Shelf(W)
shelf.append(item)
shelves.append(shelf)
# update the min_shelves
if len(shelves) < min_shelves:
min_shelves = len(shelves)
best_shelves = shelves[:]
return min_shelves, best_shelves | 0.610337 | 0.353875 |
import sys
if(len(sys.argv)!=2):
print("usage: python "+sys.argv[0]+" <integer greater than 0 and less than 10^52>")
sys.exit()
num=int(sys.argv[1])
if(num<=0 or num>=(10**52)):
print("usage: python "+sys.argv[0]+" <integer greater than 0 and less than 10^52>")
sys.exit()
out=""
def il(number):
if(number == 1):
return "일"
elif(number == 2):
return "이"
elif(number == 3):
return "삼"
elif(number == 4):
return "사"
elif(number == 5):
return "오"
elif(number == 6):
return "육"
elif(number == 7):
return "칠"
elif(number == 8):
return "팔"
elif(number == 9):
return "구"
else:
return ""
def ship(number):
if(number==10):
return "십"
return il(number//10)+"십"
def baek(number):
if(number==100):
return "백"
return il(number//100)+"백"
def cheon(number):
if(number==1000):
return "천"
return il(number//1000)+"천"
def man(number):
print(number)
if(number//(10**4)==1):
return "만"
return manStep(number//(10**4), True)+"만"
def eok(number):
if(number == (10**8)):
return "억"
return manStep(number//(10**8), True)+"억"
def jo(number):
if(number == (10**12)):
return "조"
return manStep(number//(10**12), True)+"조"
def gyeong(number):
if(number == (10**16)):
return "경"
return manStep(number//(10**16), True)+"경"
def hae(number):
if(number == (10**20)):
return "해"
return manStep(number//(10**20), True)+"해"
def ja(number):
if(number == (10**24)):
return "자"
return manStep(number//(10**24), True)+"자"
def yang(number):
if(number == (10**28)):
return "양"
return manStep(number//(10**28), True)+"양"
def gu(number):
if(number == (10**32)):
return "구"
return manStep(number//(10**32), True)+"구"
def gan(number):
if(number == (10**36)):
return "간"
return manStep(number//(10**36), True)+"간"
def jeong(number):
if(number == (10**40)):
return "정"
return manStep(number//(10**40), True)+"정"
def jae(number):
if(number == (10**44)):
return "재"
return manStep(number//(10**44), True)+"재"
def geuk(number):
if(number == (10**48)):
return "극"
return manStep(number//(10**48), True)+"극"
def manStep(number, prefix):
if(number==0):
return ""
cheonStep=number//1000
baekStep=number%1000
shipStep=baekStep%100
ilStep=shipStep%10
baekStep=baekStep//100
shipStep=shipStep//10
space=""
if(ilStep==0 or (prefix and ilStep==1)):
ilStep=""
else:
ilStep=il(ilStep)
space=" "
if(shipStep == 0):
shipStep = ""
elif(shipStep == 1):
shipStep = "십"+space
else:
shipStep = il(shipStep)+"십"+space
space = " "
if(baekStep == 0):
baekStep = ""
elif(baekStep == 1):
baekStep = "백"+space
else:
baekStep = il(baekStep)+"백"+space
space = " "
if(cheonStep == 0):
cheonStep = ""
elif(cheonStep == 1):
cheonStep = "천"+space
else:
cheonStep = il(cheonStep)+"천"+space
return cheonStep+baekStep+shipStep+ilStep
while(num>0):
if(num < 10**4):
out += manStep(num, False)
num = 0
elif(num < (10**8)):
out += man(num)
num = num % (10**4)
elif(num < (10**12)):
out += eok(num)
num = num % (10**8)
elif(num < (10**16)):
out += jo(num)
num = num % (10**12)
elif(num < (10**20)):
out += gyeong(num)
num = num % (10**16)
elif(num < (10**24)):
out += hae(num)
num = num % (10**20)
elif(num < (10**28)):
out += ja(num)
num = num % (10**24)
elif(num < (10**32)):
out += yang(num)
num = num % (10**28)
elif(num < (10**36)):
out += gu(num)
num = num % (10**32)
elif(num < (10**40)):
out += gan(num)
num = num % (10**36)
elif(num < (10**44)):
out += jeong(num)
num = num % (10**40)
elif(num < (10**48)):
out += jae(num)
num = num % (10**44)
else:
out += geuk(num)
num = num % (10**48)
if(num!=0):
out+=" "
print(out) | sino-koreanNums.py | import sys
if(len(sys.argv)!=2):
print("usage: python "+sys.argv[0]+" <integer greater than 0 and less than 10^52>")
sys.exit()
num=int(sys.argv[1])
if(num<=0 or num>=(10**52)):
print("usage: python "+sys.argv[0]+" <integer greater than 0 and less than 10^52>")
sys.exit()
out=""
def il(number):
if(number == 1):
return "일"
elif(number == 2):
return "이"
elif(number == 3):
return "삼"
elif(number == 4):
return "사"
elif(number == 5):
return "오"
elif(number == 6):
return "육"
elif(number == 7):
return "칠"
elif(number == 8):
return "팔"
elif(number == 9):
return "구"
else:
return ""
def ship(number):
if(number==10):
return "십"
return il(number//10)+"십"
def baek(number):
if(number==100):
return "백"
return il(number//100)+"백"
def cheon(number):
if(number==1000):
return "천"
return il(number//1000)+"천"
def man(number):
print(number)
if(number//(10**4)==1):
return "만"
return manStep(number//(10**4), True)+"만"
def eok(number):
if(number == (10**8)):
return "억"
return manStep(number//(10**8), True)+"억"
def jo(number):
if(number == (10**12)):
return "조"
return manStep(number//(10**12), True)+"조"
def gyeong(number):
if(number == (10**16)):
return "경"
return manStep(number//(10**16), True)+"경"
def hae(number):
if(number == (10**20)):
return "해"
return manStep(number//(10**20), True)+"해"
def ja(number):
if(number == (10**24)):
return "자"
return manStep(number//(10**24), True)+"자"
def yang(number):
if(number == (10**28)):
return "양"
return manStep(number//(10**28), True)+"양"
def gu(number):
if(number == (10**32)):
return "구"
return manStep(number//(10**32), True)+"구"
def gan(number):
if(number == (10**36)):
return "간"
return manStep(number//(10**36), True)+"간"
def jeong(number):
if(number == (10**40)):
return "정"
return manStep(number//(10**40), True)+"정"
def jae(number):
if(number == (10**44)):
return "재"
return manStep(number//(10**44), True)+"재"
def geuk(number):
if(number == (10**48)):
return "극"
return manStep(number//(10**48), True)+"극"
def manStep(number, prefix):
if(number==0):
return ""
cheonStep=number//1000
baekStep=number%1000
shipStep=baekStep%100
ilStep=shipStep%10
baekStep=baekStep//100
shipStep=shipStep//10
space=""
if(ilStep==0 or (prefix and ilStep==1)):
ilStep=""
else:
ilStep=il(ilStep)
space=" "
if(shipStep == 0):
shipStep = ""
elif(shipStep == 1):
shipStep = "십"+space
else:
shipStep = il(shipStep)+"십"+space
space = " "
if(baekStep == 0):
baekStep = ""
elif(baekStep == 1):
baekStep = "백"+space
else:
baekStep = il(baekStep)+"백"+space
space = " "
if(cheonStep == 0):
cheonStep = ""
elif(cheonStep == 1):
cheonStep = "천"+space
else:
cheonStep = il(cheonStep)+"천"+space
return cheonStep+baekStep+shipStep+ilStep
while(num>0):
if(num < 10**4):
out += manStep(num, False)
num = 0
elif(num < (10**8)):
out += man(num)
num = num % (10**4)
elif(num < (10**12)):
out += eok(num)
num = num % (10**8)
elif(num < (10**16)):
out += jo(num)
num = num % (10**12)
elif(num < (10**20)):
out += gyeong(num)
num = num % (10**16)
elif(num < (10**24)):
out += hae(num)
num = num % (10**20)
elif(num < (10**28)):
out += ja(num)
num = num % (10**24)
elif(num < (10**32)):
out += yang(num)
num = num % (10**28)
elif(num < (10**36)):
out += gu(num)
num = num % (10**32)
elif(num < (10**40)):
out += gan(num)
num = num % (10**36)
elif(num < (10**44)):
out += jeong(num)
num = num % (10**40)
elif(num < (10**48)):
out += jae(num)
num = num % (10**44)
else:
out += geuk(num)
num = num % (10**48)
if(num!=0):
out+=" "
print(out) | 0.025945 | 0.149128 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import requests as req
import sqlalchemy as sa
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy.dialects import postgresql as psql
import psycopg2.extensions as psql_ext
from PyLagoon.config import LagoonConfig
from PyLagoon.source import Source
class PGMeta:
"""Given a list of Sources, builds the classes necessary
to the querying EDSL.
Can be indexed by Sources or source view names."""
def __init__(self, sources):
self.__view_names = [s.view_name for s in sources]
self.__md = sa.MetaData()
for s in sources:
self.__add_source_to_md(s)
self.__base = automap_base(metadata=self.__md)
self.__base.prepare()
@property
def sql_tables(self):
"""Gives the sql schema of the registered table"""
return self.__md.tables
def __getitem__(self, key):
if isinstance(key, Source):
key = key.view_name
return self.__base.classes[key]
def __sql_column_from_json_column(self, col):
typ = (
col["type"].replace(" ", "_").replace("DOCUMENT", "TEXT")
) # "DOUBLE PRECISION" -> "DOUBLE_PRECISION"
subtyp = None
sql_typ = None
if isinstance(typ, list):
typ = typ[0]
if len(typ) > 1:
subtyp = typ[1]
if hasattr(psql, typ):
sql_typ = getattr(psql, typ)
elif hasattr(sa, typ):
sql_typ = getattr(sa, typ)
if not sql_typ:
raise Exception("Type not supported by sqlalchemy/postgresql: " + typ)
else:
return sa.Column(col["inView"], sql_typ)
def __add_source_to_md(self, source):
sa.Table(
source.view_name,
self.__md,
sa.Column("ix", sa.Integer, primary_key=True),
*(self.__sql_column_from_json_column(c) for c in source.columns.values()),
schema=source.schema
)
def query(self, *sources):
"""Starts a query on the given sources, which can
be strings (view names), instances of Source or classes generated
by sqlalchemy, as returned by self[...].
If no source is given, starts a query on every
source know by this PGMeta.
Returns a sqlalchemy.orm.query.Query"""
if len(sources) == 0:
sources = self.__view_names
return Session().query(
*((self[s] if isinstance(s, str) or isinstance(s, Source) else s) for s in sources)
)
def build_sql_query(query):
"""Takes a sqlalchemy.orm.query.Query and returns
a string representing the final query to be addressed
to the lagoon-server"""
d = psql.dialect()
q = query.statement.compile(dialect=d)
# The following is not ideal, as q.params and str(q) should
# normally be passed separately to the PostgreSQL database:
ps = {}
for k, v in q.params.items():
ps[k] = psql_ext.adapt(v).getquoted().decode(d.encoding)
return str(q) % ps | clients/PyLagoon/PyLagoon/postgresql.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import requests as req
import sqlalchemy as sa
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy.dialects import postgresql as psql
import psycopg2.extensions as psql_ext
from PyLagoon.config import LagoonConfig
from PyLagoon.source import Source
class PGMeta:
"""Given a list of Sources, builds the classes necessary
to the querying EDSL.
Can be indexed by Sources or source view names."""
def __init__(self, sources):
self.__view_names = [s.view_name for s in sources]
self.__md = sa.MetaData()
for s in sources:
self.__add_source_to_md(s)
self.__base = automap_base(metadata=self.__md)
self.__base.prepare()
@property
def sql_tables(self):
"""Gives the sql schema of the registered table"""
return self.__md.tables
def __getitem__(self, key):
if isinstance(key, Source):
key = key.view_name
return self.__base.classes[key]
def __sql_column_from_json_column(self, col):
typ = (
col["type"].replace(" ", "_").replace("DOCUMENT", "TEXT")
) # "DOUBLE PRECISION" -> "DOUBLE_PRECISION"
subtyp = None
sql_typ = None
if isinstance(typ, list):
typ = typ[0]
if len(typ) > 1:
subtyp = typ[1]
if hasattr(psql, typ):
sql_typ = getattr(psql, typ)
elif hasattr(sa, typ):
sql_typ = getattr(sa, typ)
if not sql_typ:
raise Exception("Type not supported by sqlalchemy/postgresql: " + typ)
else:
return sa.Column(col["inView"], sql_typ)
def __add_source_to_md(self, source):
sa.Table(
source.view_name,
self.__md,
sa.Column("ix", sa.Integer, primary_key=True),
*(self.__sql_column_from_json_column(c) for c in source.columns.values()),
schema=source.schema
)
def query(self, *sources):
"""Starts a query on the given sources, which can
be strings (view names), instances of Source or classes generated
by sqlalchemy, as returned by self[...].
If no source is given, starts a query on every
source know by this PGMeta.
Returns a sqlalchemy.orm.query.Query"""
if len(sources) == 0:
sources = self.__view_names
return Session().query(
*((self[s] if isinstance(s, str) or isinstance(s, Source) else s) for s in sources)
)
def build_sql_query(query):
"""Takes a sqlalchemy.orm.query.Query and returns
a string representing the final query to be addressed
to the lagoon-server"""
d = psql.dialect()
q = query.statement.compile(dialect=d)
# The following is not ideal, as q.params and str(q) should
# normally be passed separately to the PostgreSQL database:
ps = {}
for k, v in q.params.items():
ps[k] = psql_ext.adapt(v).getquoted().decode(d.encoding)
return str(q) % ps | 0.874305 | 0.25972 |
class GeometryData:
""" Class which holds the geometry data of a ObjId
"""
def __init__(self, subdetid = 0, discriminator = ()):
self.subdetid = subdetid
self.discriminator = discriminator
# ObjId names from Alignment/CommonAlignment/interface/StructureType.h
data = {-1: GeometryData(), # notfound
0: GeometryData(), # invalid
1: GeometryData(), # AlignableDetUnit
2: GeometryData(), # AlignableDet
3: GeometryData(1), # TPBModule
4: GeometryData(1, ("Half", "Layer", "Rod")), # TPBLadder
5: GeometryData(1, ("Half", "Layer")), # TPBLayer
6: GeometryData(1, ("Half",)), # TPBHalfBarrel
7: GeometryData(1), # TPBBarrel
8: GeometryData(2), # TPEModule
9: GeometryData(2, ("Side", "Half", "Layer", "Blade", "Panel")), # TPEPanel
10: GeometryData(2, ("Side", "Half", "Layer", "Blade")), # TPEBlade
11: GeometryData(2, ("Side", "Half", "Layer")), # TPEHalfDisk
12: GeometryData(2, ("Side", "Half")), # TPEHalfCylinder
13: GeometryData(2, ("Side",)), # TPEEndcap
14: GeometryData(3), # TIBModule
15: GeometryData(3), # TIBString
16: GeometryData(3, ("Side", "Layer", "Half", "OuterInner")), # TIBSurface
17: GeometryData(3, ("Side", "Layer", "Half")), # TIBHalfShell
18: GeometryData(3, ("Side", "Layer")), # TIBLayer
19: GeometryData(3, ("Side",)), # TIBHalfBarrel
20: GeometryData(3), # TIBBarrel
21: GeometryData(4), # TIDModule
22: GeometryData(4, ("Side", "Layer", "Ring", "OuterInner")), # TIDSide
23: GeometryData(4, ("Side", "Layer", "Ring")), # TIDRing
24: GeometryData(4, ("Side", "Layer")), # TIDDisk
25: GeometryData(4, ("Side",)), # TIDEndcap
26: GeometryData(5), # TOBModule
27: GeometryData(5, ("Side", "Layer", "Rod")), # TOBRod
28: GeometryData(5, ("Side", "Layer")), # TOBLayer
29: GeometryData(5, ("Side",)), # TOBHalfBarrel
30: GeometryData(5), # TOBBarrel
31: GeometryData(6), # TECModule
32: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal", "Ring")), # TECRing
33: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal")), # TECPetal
34: GeometryData(6, ("Side", "Layer", "OuterInner")), # TECSide
35: GeometryData(6, ("Side", "Layer")), # TECDisk
36: GeometryData(6, ("Side",)), # TECEndcap
37: GeometryData(), # Pixel
38: GeometryData(), # Strip
39: GeometryData(), # Tracker
100: GeometryData(), # AlignableDTBarrel
101: GeometryData(), # AlignableDTWheel
102: GeometryData(), # AlignableDTStation
103: GeometryData(), # AlignableDTChamber
104: GeometryData(), # AlignableDTSuperLayer
105: GeometryData(), # AlignableDTLayer
106: GeometryData(), # AlignableCSCEndcap
107: GeometryData(), # AlignableCSCStation
108: GeometryData(), # AlignableCSCRing
109: GeometryData(), # AlignableCSCChamber
110: GeometryData(), # AlignableCSCLayer
111: GeometryData(), # AlignableMuon
112: GeometryData(), # Detector
1000: GeometryData(), # Extras
1001: GeometryData(), # BeamSpot
} | Alignment/MillePedeAlignmentAlgorithm/python/mpsvalidate/geometrydata.py |
class GeometryData:
""" Class which holds the geometry data of a ObjId
"""
def __init__(self, subdetid = 0, discriminator = ()):
self.subdetid = subdetid
self.discriminator = discriminator
# ObjId names from Alignment/CommonAlignment/interface/StructureType.h
data = {-1: GeometryData(), # notfound
0: GeometryData(), # invalid
1: GeometryData(), # AlignableDetUnit
2: GeometryData(), # AlignableDet
3: GeometryData(1), # TPBModule
4: GeometryData(1, ("Half", "Layer", "Rod")), # TPBLadder
5: GeometryData(1, ("Half", "Layer")), # TPBLayer
6: GeometryData(1, ("Half",)), # TPBHalfBarrel
7: GeometryData(1), # TPBBarrel
8: GeometryData(2), # TPEModule
9: GeometryData(2, ("Side", "Half", "Layer", "Blade", "Panel")), # TPEPanel
10: GeometryData(2, ("Side", "Half", "Layer", "Blade")), # TPEBlade
11: GeometryData(2, ("Side", "Half", "Layer")), # TPEHalfDisk
12: GeometryData(2, ("Side", "Half")), # TPEHalfCylinder
13: GeometryData(2, ("Side",)), # TPEEndcap
14: GeometryData(3), # TIBModule
15: GeometryData(3), # TIBString
16: GeometryData(3, ("Side", "Layer", "Half", "OuterInner")), # TIBSurface
17: GeometryData(3, ("Side", "Layer", "Half")), # TIBHalfShell
18: GeometryData(3, ("Side", "Layer")), # TIBLayer
19: GeometryData(3, ("Side",)), # TIBHalfBarrel
20: GeometryData(3), # TIBBarrel
21: GeometryData(4), # TIDModule
22: GeometryData(4, ("Side", "Layer", "Ring", "OuterInner")), # TIDSide
23: GeometryData(4, ("Side", "Layer", "Ring")), # TIDRing
24: GeometryData(4, ("Side", "Layer")), # TIDDisk
25: GeometryData(4, ("Side",)), # TIDEndcap
26: GeometryData(5), # TOBModule
27: GeometryData(5, ("Side", "Layer", "Rod")), # TOBRod
28: GeometryData(5, ("Side", "Layer")), # TOBLayer
29: GeometryData(5, ("Side",)), # TOBHalfBarrel
30: GeometryData(5), # TOBBarrel
31: GeometryData(6), # TECModule
32: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal", "Ring")), # TECRing
33: GeometryData(6, ("Side", "Layer", "OuterInner", "Petal")), # TECPetal
34: GeometryData(6, ("Side", "Layer", "OuterInner")), # TECSide
35: GeometryData(6, ("Side", "Layer")), # TECDisk
36: GeometryData(6, ("Side",)), # TECEndcap
37: GeometryData(), # Pixel
38: GeometryData(), # Strip
39: GeometryData(), # Tracker
100: GeometryData(), # AlignableDTBarrel
101: GeometryData(), # AlignableDTWheel
102: GeometryData(), # AlignableDTStation
103: GeometryData(), # AlignableDTChamber
104: GeometryData(), # AlignableDTSuperLayer
105: GeometryData(), # AlignableDTLayer
106: GeometryData(), # AlignableCSCEndcap
107: GeometryData(), # AlignableCSCStation
108: GeometryData(), # AlignableCSCRing
109: GeometryData(), # AlignableCSCChamber
110: GeometryData(), # AlignableCSCLayer
111: GeometryData(), # AlignableMuon
112: GeometryData(), # Detector
1000: GeometryData(), # Extras
1001: GeometryData(), # BeamSpot
} | 0.781205 | 0.47859 |
from django.http import HttpResponse, JsonResponse
from polls.models import Recommendation
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
import json
from django.db import connection
from django.conf import settings
from django.views.generic import View
import logging
import os
class FrontendAppView(View):
"""
Serves the complied frontend entry point
"""
def get(self, request):
try:
with open(os.path.join(settings.REACT_APP_DIR, 'build', 'index.html')) as f:
return HttpResponse(f.read())
except IOError:
print('hi')
logging.exception('Production build of app not found')
return HttpResponse(
status = 501,
)
# Route to to pull the three highest rated recommendations from the database and send to client
@csrf_exempt
def index(request):
# get the top three recommendations based on rating
getRecommendations = Recommendation.objects.order_by('-rating')[0:3]
print(getRecommendations)
if len(getRecommendations) == 0:
return HttpResponse('There are no recommendations')
elif len(getRecommendations) < 3:
return HttpResponse('There are not enough recommendations')
else:
recommendations_serialized = serializers.serialize('json', getRecommendations)
# send it over to react to render components
return HttpResponse(recommendations_serialized)
# Receives the new ratings from the client and updates the database with it
@csrf_exempt
def rating(request):
response = json.loads(request.body)
recommendations = response['recommendations']
for recommendation in recommendations:
currRating = recommendation['rating']
currId = recommendation['id']
responseMsg = update_rating(currRating, currId)
return HttpResponse(responseMsg)
# Calls UPDATE SQL query given a rating and id
def update_rating(currRating, currId):
# validate inputs
badInputs = 'Inputs did not match the type'
executed = 'ratings were changed'
if type(currRating) != int or type(currId) != int:
return badInputs
with connection.cursor() as cursor:
cursor.execute('UPDATE polls_recommendation SET rating = %s WHERE id = %s;', [currRating, currId])
return executed | webapp/polls/views.py | from django.http import HttpResponse, JsonResponse
from polls.models import Recommendation
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
import json
from django.db import connection
from django.conf import settings
from django.views.generic import View
import logging
import os
class FrontendAppView(View):
"""
Serves the complied frontend entry point
"""
def get(self, request):
try:
with open(os.path.join(settings.REACT_APP_DIR, 'build', 'index.html')) as f:
return HttpResponse(f.read())
except IOError:
print('hi')
logging.exception('Production build of app not found')
return HttpResponse(
status = 501,
)
# Route to to pull the three highest rated recommendations from the database and send to client
@csrf_exempt
def index(request):
# get the top three recommendations based on rating
getRecommendations = Recommendation.objects.order_by('-rating')[0:3]
print(getRecommendations)
if len(getRecommendations) == 0:
return HttpResponse('There are no recommendations')
elif len(getRecommendations) < 3:
return HttpResponse('There are not enough recommendations')
else:
recommendations_serialized = serializers.serialize('json', getRecommendations)
# send it over to react to render components
return HttpResponse(recommendations_serialized)
# Receives the new ratings from the client and updates the database with it
@csrf_exempt
def rating(request):
response = json.loads(request.body)
recommendations = response['recommendations']
for recommendation in recommendations:
currRating = recommendation['rating']
currId = recommendation['id']
responseMsg = update_rating(currRating, currId)
return HttpResponse(responseMsg)
# Calls UPDATE SQL query given a rating and id
def update_rating(currRating, currId):
# validate inputs
badInputs = 'Inputs did not match the type'
executed = 'ratings were changed'
if type(currRating) != int or type(currId) != int:
return badInputs
with connection.cursor() as cursor:
cursor.execute('UPDATE polls_recommendation SET rating = %s WHERE id = %s;', [currRating, currId])
return executed | 0.462959 | 0.086131 |
import tensorflow as tf
class ShuffleNetV2:
def __init__(self, complexity=2, num_classes=10):
self.output_classes = num_classes
self.complexity = complexity
if self.complexity == 0.5:
self.out_filters = [48, 96, 192]
elif self.complexity == 1:
self.out_filters = [116, 232, 464]
elif self.complexity == 1.5:
self.out_filters = [176, 352, 704]
elif self.complexity == 2:
self.out_filters = [244, 488, 976]
else:
raise ValueError('[ShuffleNetV2] complexity is invalid, try 0.5, 1, 1.5, 2')
@staticmethod
def split_unit(unit_input, split_ratio=0.5):
input_chn = int(unit_input.shape[-1])
chn_split_x = input_chn * split_ratio
chn_split_y = input_chn * (1 - split_ratio)
return unit_input[:, :, :, 0:int(chn_split_x)], unit_input[:, :, :, int(chn_split_y):input_chn]
@staticmethod
def shuffle_unit(unit_input, groups=2):
img_num, img_height, img_width, img_chn = unit_input.shape
x = tf.reshape(unit_input, [-1, img_height, img_width, groups, img_chn//groups])
x = tf.transpose(x, perm=[0, 1, 2, 4, 3])
unit = tf.reshape(x, [-1, img_height, img_width, img_chn])
return unit
def bottleneck(self, block_input, out_filters, chn_split_ratio=0.5, down_sample=False, scope='blk'):
with tf.variable_scope(scope):
if down_sample:
blk_strides = 2
mid_filters = out_filters // 2
x_right = block_input
x_left = tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=2,
padding='same', use_bias=False)(block_input)
x_left = tf.layers.batch_normalization(x_left, training=True)
x_left = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1,
strides=1, use_bias=False)(x_left)
x_left = tf.layers.batch_normalization(x_left, training=True)
x_left = tf.keras.activations.relu(x_left)
else:
blk_strides = 1
mid_filters = int(int(block_input.shape[-1]) * chn_split_ratio)
x_left, x_right = self.split_unit(block_input)
x_right = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1, strides=1,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.activations.relu(x_right)
x_right = tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=blk_strides,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1, strides=1,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.activations.relu(x_right)
x = tf.concat([x_left, x_right], axis=3)
block = self.shuffle_unit(x)
return block
def fc_layer(self, layer_input, scope='fc'):
with tf.variable_scope(scope):
layer = tf.keras.layers.Flatten()(layer_input)
layer = tf.keras.layers.Dense(units=self.output_classes)(layer)
return layer
def build(self, model_input):
with tf.variable_scope('stage_1'):
x = tf.keras.layers.Conv2D(filters=24, kernel_size=3, strides=2, use_bias=False)(model_input)
x = tf.layers.batch_normalization(x, training=True)
x = tf.keras.activations.relu(x)
with tf.variable_scope('stage_2'):
x = self.bottleneck(x, out_filters=self.out_filters[0], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 4):
x = self.bottleneck(x, out_filters=self.out_filters[0], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
with tf.variable_scope('stage_3'):
x = self.bottleneck(x, out_filters=self.out_filters[1], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 8):
x = self.bottleneck(x, out_filters=self.out_filters[1], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
with tf.variable_scope('stage_4'):
x = self.bottleneck(x, out_filters=self.out_filters[2], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 4):
x = self.bottleneck(x, out_filters=self.out_filters[2], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
x = tf.keras.layers.Conv2D(filters=1024, kernel_size=1, strides=1)(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
model = self.fc_layer(x, scope='fc')
return model | models/shufflenet_v2.py | import tensorflow as tf
class ShuffleNetV2:
def __init__(self, complexity=2, num_classes=10):
self.output_classes = num_classes
self.complexity = complexity
if self.complexity == 0.5:
self.out_filters = [48, 96, 192]
elif self.complexity == 1:
self.out_filters = [116, 232, 464]
elif self.complexity == 1.5:
self.out_filters = [176, 352, 704]
elif self.complexity == 2:
self.out_filters = [244, 488, 976]
else:
raise ValueError('[ShuffleNetV2] complexity is invalid, try 0.5, 1, 1.5, 2')
@staticmethod
def split_unit(unit_input, split_ratio=0.5):
input_chn = int(unit_input.shape[-1])
chn_split_x = input_chn * split_ratio
chn_split_y = input_chn * (1 - split_ratio)
return unit_input[:, :, :, 0:int(chn_split_x)], unit_input[:, :, :, int(chn_split_y):input_chn]
@staticmethod
def shuffle_unit(unit_input, groups=2):
img_num, img_height, img_width, img_chn = unit_input.shape
x = tf.reshape(unit_input, [-1, img_height, img_width, groups, img_chn//groups])
x = tf.transpose(x, perm=[0, 1, 2, 4, 3])
unit = tf.reshape(x, [-1, img_height, img_width, img_chn])
return unit
def bottleneck(self, block_input, out_filters, chn_split_ratio=0.5, down_sample=False, scope='blk'):
with tf.variable_scope(scope):
if down_sample:
blk_strides = 2
mid_filters = out_filters // 2
x_right = block_input
x_left = tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=2,
padding='same', use_bias=False)(block_input)
x_left = tf.layers.batch_normalization(x_left, training=True)
x_left = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1,
strides=1, use_bias=False)(x_left)
x_left = tf.layers.batch_normalization(x_left, training=True)
x_left = tf.keras.activations.relu(x_left)
else:
blk_strides = 1
mid_filters = int(int(block_input.shape[-1]) * chn_split_ratio)
x_left, x_right = self.split_unit(block_input)
x_right = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1, strides=1,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.activations.relu(x_right)
x_right = tf.keras.layers.DepthwiseConv2D(kernel_size=3, strides=blk_strides,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.layers.Conv2D(filters=mid_filters, kernel_size=1, strides=1,
padding='same', use_bias=False)(x_right)
x_right = tf.layers.batch_normalization(x_right, training=True)
x_right = tf.keras.activations.relu(x_right)
x = tf.concat([x_left, x_right], axis=3)
block = self.shuffle_unit(x)
return block
def fc_layer(self, layer_input, scope='fc'):
with tf.variable_scope(scope):
layer = tf.keras.layers.Flatten()(layer_input)
layer = tf.keras.layers.Dense(units=self.output_classes)(layer)
return layer
def build(self, model_input):
with tf.variable_scope('stage_1'):
x = tf.keras.layers.Conv2D(filters=24, kernel_size=3, strides=2, use_bias=False)(model_input)
x = tf.layers.batch_normalization(x, training=True)
x = tf.keras.activations.relu(x)
with tf.variable_scope('stage_2'):
x = self.bottleneck(x, out_filters=self.out_filters[0], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 4):
x = self.bottleneck(x, out_filters=self.out_filters[0], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
with tf.variable_scope('stage_3'):
x = self.bottleneck(x, out_filters=self.out_filters[1], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 8):
x = self.bottleneck(x, out_filters=self.out_filters[1], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
with tf.variable_scope('stage_4'):
x = self.bottleneck(x, out_filters=self.out_filters[2], chn_split_ratio=0.5,
down_sample=True, scope='blk_0')
for i in range(1, 4):
x = self.bottleneck(x, out_filters=self.out_filters[2], chn_split_ratio=0.5,
down_sample=False, scope='blk_'+str(i))
x = tf.keras.layers.Conv2D(filters=1024, kernel_size=1, strides=1)(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
model = self.fc_layer(x, scope='fc')
return model | 0.915498 | 0.472744 |
import io
import pytest
import sys
import sudoku.grid as grid
class TestGrid:
def test_grid(self):
g = grid.Grid()
assert g.grid_type == "grid"
def test_grid_add_row(self):
g = grid.Grid()
row = [7, 0, 9, 4, 0, 2, 3, 8, 0]
g.add_row(row)
g.add_row(row)
assert g.rows == [row, row]
def test_grid_add_row_exceptions(self):
g = grid.Grid()
tests = [None, "foo", 1, {"a": 4}, {3, 4, 5}, (3, 4, 5)]
for test in tests:
with pytest.raises(TypeError):
g.add_row(test)
tests = [[1], [1, 2, 3, 4, 5, 6, 7, 8, 8], [1, 2, 3, 4, 5, 6, 7, 10, 8]]
for test in tests:
with pytest.raises(ValueError):
g.add_row(test)
with pytest.raises(RuntimeError):
# modify the row count internally to quickly verify you can't
# add more than 9 rows
g.rows = [[], [], [], [], [], [], [], [], []]
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_grid_show(self):
_sysout = sys.stdout
sys.stdout = result = io.StringIO()
g = grid.Grid()
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
g.show()
sys.stdout = _sysout
assert result.getvalue() == "[1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
def test_grid_solve(self):
g = grid.Grid()
g.add_row([0, 5, 4, 9, 6, 0, 0, 3, 8])
g.add_row([0, 0, 0, 0, 0, 0, 4, 0, 0])
g.add_row([7, 0, 2, 3, 5, 4, 0, 6, 9])
g.add_row([0, 7, 0, 0, 9, 3, 0, 0, 0])
g.add_row([4, 0, 0, 0, 0, 0, 0, 0, 2])
g.add_row([0, 0, 0, 6, 2, 0, 0, 4, 0])
g.add_row([6, 4, 0, 1, 8, 9, 3, 0, 5])
g.add_row([0, 0, 3, 0, 0, 0, 0, 0, 0])
g.add_row([8, 2, 0, 0, 3, 5, 6, 9, 0])
g.solve()
assert g.rows[0] == [1, 5, 4, 9, 6, 7, 2, 3, 8]
assert g.rows[1] == [3, 6, 9, 8, 1, 2, 4, 5, 7]
assert g.rows[2] == [7, 8, 2, 3, 5, 4, 1, 6, 9]
assert g.rows[3] == [2, 7, 8, 4, 9, 3, 5, 1, 6]
assert g.rows[4] == [4, 3, 6, 5, 7, 1, 9, 8, 2]
assert g.rows[5] == [9, 1, 5, 6, 2, 8, 7, 4, 3]
assert g.rows[6] == [6, 4, 7, 1, 8, 9, 3, 2, 5]
assert g.rows[7] == [5, 9, 3, 2, 4, 6, 8, 7, 1]
assert g.rows[8] == [8, 2, 1, 7, 3, 5, 6, 9, 4]
def test_grid_solve_unsolvable(self):
g = grid.Grid()
g.add_row([7, 0, 9, 4, 0, 2, 3, 8, 0])
g.add_row([6, 0, 3, 0, 0, 0, 0, 5, 0])
g.add_row([0, 8, 0, 0, 0, 5, 0, 0, 0])
g.add_row([0, 0, 4, 2, 1, 8, 0, 9, 0])
g.add_row([0, 0, 0, 6, 0, 4, 0, 0, 0])
g.add_row([0, 7, 0, 5, 3, 9, 4, 0, 0])
g.add_row([0, 0, 0, 1, 0, 0, 0, 4, 0])
g.add_row([0, 9, 0, 0, 0, 0, 5, 0, 3])
g.add_row([0, 4, 5, 9, 0, 7, 1, 0, 8])
with pytest.raises(Exception):
g.solve()
def test_grid_solved(self):
g = grid.Grid()
g.add_row([7, 0, 9, 4, 0, 2, 3, 8, 0])
g.add_row([6, 0, 3, 0, 0, 0, 0, 5, 0])
g.add_row([0, 8, 0, 0, 0, 5, 0, 0, 0])
g.add_row([0, 0, 4, 2, 1, 8, 0, 9, 0])
g.add_row([0, 0, 0, 6, 0, 4, 0, 0, 0])
g.add_row([0, 7, 0, 5, 3, 9, 4, 0, 0])
g.add_row([0, 0, 0, 1, 0, 0, 0, 4, 0])
g.add_row([0, 9, 0, 0, 0, 0, 5, 0, 3])
g.add_row([0, 4, 5, 9, 0, 7, 1, 0, 8])
assert g.solved() is False
def test_validate_row():
tests = [None, "foo", 1, {"a": 4}, {3, 4, 5}, (3, 4, 5)]
for test in tests:
with pytest.raises(TypeError):
grid.validate_row(test)
tests = [[1], [1, 2, 3, 4, 5, 6, 7, 8, 8], [1, 2, 3, 4, 5, 6, 7, 10, 8]]
for test in tests:
with pytest.raises(ValueError):
grid.validate_row(test) | tests/test_grid.py | import io
import pytest
import sys
import sudoku.grid as grid
class TestGrid:
def test_grid(self):
g = grid.Grid()
assert g.grid_type == "grid"
def test_grid_add_row(self):
g = grid.Grid()
row = [7, 0, 9, 4, 0, 2, 3, 8, 0]
g.add_row(row)
g.add_row(row)
assert g.rows == [row, row]
def test_grid_add_row_exceptions(self):
g = grid.Grid()
tests = [None, "foo", 1, {"a": 4}, {3, 4, 5}, (3, 4, 5)]
for test in tests:
with pytest.raises(TypeError):
g.add_row(test)
tests = [[1], [1, 2, 3, 4, 5, 6, 7, 8, 8], [1, 2, 3, 4, 5, 6, 7, 10, 8]]
for test in tests:
with pytest.raises(ValueError):
g.add_row(test)
with pytest.raises(RuntimeError):
# modify the row count internally to quickly verify you can't
# add more than 9 rows
g.rows = [[], [], [], [], [], [], [], [], []]
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_grid_show(self):
_sysout = sys.stdout
sys.stdout = result = io.StringIO()
g = grid.Grid()
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
g.show()
sys.stdout = _sysout
assert result.getvalue() == "[1, 2, 3, 4, 5, 6, 7, 8, 9]\n"
def test_grid_solve(self):
g = grid.Grid()
g.add_row([0, 5, 4, 9, 6, 0, 0, 3, 8])
g.add_row([0, 0, 0, 0, 0, 0, 4, 0, 0])
g.add_row([7, 0, 2, 3, 5, 4, 0, 6, 9])
g.add_row([0, 7, 0, 0, 9, 3, 0, 0, 0])
g.add_row([4, 0, 0, 0, 0, 0, 0, 0, 2])
g.add_row([0, 0, 0, 6, 2, 0, 0, 4, 0])
g.add_row([6, 4, 0, 1, 8, 9, 3, 0, 5])
g.add_row([0, 0, 3, 0, 0, 0, 0, 0, 0])
g.add_row([8, 2, 0, 0, 3, 5, 6, 9, 0])
g.solve()
assert g.rows[0] == [1, 5, 4, 9, 6, 7, 2, 3, 8]
assert g.rows[1] == [3, 6, 9, 8, 1, 2, 4, 5, 7]
assert g.rows[2] == [7, 8, 2, 3, 5, 4, 1, 6, 9]
assert g.rows[3] == [2, 7, 8, 4, 9, 3, 5, 1, 6]
assert g.rows[4] == [4, 3, 6, 5, 7, 1, 9, 8, 2]
assert g.rows[5] == [9, 1, 5, 6, 2, 8, 7, 4, 3]
assert g.rows[6] == [6, 4, 7, 1, 8, 9, 3, 2, 5]
assert g.rows[7] == [5, 9, 3, 2, 4, 6, 8, 7, 1]
assert g.rows[8] == [8, 2, 1, 7, 3, 5, 6, 9, 4]
def test_grid_solve_unsolvable(self):
g = grid.Grid()
g.add_row([7, 0, 9, 4, 0, 2, 3, 8, 0])
g.add_row([6, 0, 3, 0, 0, 0, 0, 5, 0])
g.add_row([0, 8, 0, 0, 0, 5, 0, 0, 0])
g.add_row([0, 0, 4, 2, 1, 8, 0, 9, 0])
g.add_row([0, 0, 0, 6, 0, 4, 0, 0, 0])
g.add_row([0, 7, 0, 5, 3, 9, 4, 0, 0])
g.add_row([0, 0, 0, 1, 0, 0, 0, 4, 0])
g.add_row([0, 9, 0, 0, 0, 0, 5, 0, 3])
g.add_row([0, 4, 5, 9, 0, 7, 1, 0, 8])
with pytest.raises(Exception):
g.solve()
def test_grid_solved(self):
g = grid.Grid()
g.add_row([7, 0, 9, 4, 0, 2, 3, 8, 0])
g.add_row([6, 0, 3, 0, 0, 0, 0, 5, 0])
g.add_row([0, 8, 0, 0, 0, 5, 0, 0, 0])
g.add_row([0, 0, 4, 2, 1, 8, 0, 9, 0])
g.add_row([0, 0, 0, 6, 0, 4, 0, 0, 0])
g.add_row([0, 7, 0, 5, 3, 9, 4, 0, 0])
g.add_row([0, 0, 0, 1, 0, 0, 0, 4, 0])
g.add_row([0, 9, 0, 0, 0, 0, 5, 0, 3])
g.add_row([0, 4, 5, 9, 0, 7, 1, 0, 8])
assert g.solved() is False
def test_validate_row():
tests = [None, "foo", 1, {"a": 4}, {3, 4, 5}, (3, 4, 5)]
for test in tests:
with pytest.raises(TypeError):
grid.validate_row(test)
tests = [[1], [1, 2, 3, 4, 5, 6, 7, 8, 8], [1, 2, 3, 4, 5, 6, 7, 10, 8]]
for test in tests:
with pytest.raises(ValueError):
grid.validate_row(test) | 0.422147 | 0.786131 |
from __future__ import print_function
import argparse
import math
import os
import struct
import sys
from csiphash import siphash24
from cskipdict import SkipDict
class Element(object):
__slots__ = ('value', 'count')
def __init__(self, value):
self.value = value
self.count = 1
class Recordinality(object):
def __init__(self, size, hash_key=None, store_values=True):
if hash_key is None:
hash_key = os.urandom(16)
self.hash = lambda val: struct.unpack('q', siphash24(hash_key, val))[0]
self.k_records = SkipDict()
self.size = size
self.modifications = 0
self.store_values = store_values
def add(self, value):
hash = self.hash(value)
if hash in self.k_records:
element = self.k_records[hash]
if self.store_values and element.value == value:
element.count += 1
elif len(self.k_records) < self.size:
self.k_records[hash] = Element(value if self.store_values else None)
self.modifications += 1
else:
min_key, min_val = self.k_records.minimum()
if min_key < hash:
del self.k_records[min_key]
self.k_records[hash] = Element(value if self.store_values else None)
self.modifications += 1
def cardinality(self):
# We have an exact cardinality up to self.size
if self.modifications <= self.size:
return self.modifications
pow = self.modifications - self.size + 1
estimate = (self.size * math.pow(1 + (1.0 / self.size), pow)) - 1
return int(estimate)
def error(self):
cardinality = self.cardinality()
return math.sqrt(math.pow(cardinality / (self.size * math.e), 1.0 / self.size) - 1)
@property
def sample(self):
if not self.store_values:
raise AttributeError("This Recordinality is not configured to store values for sampling")
for key, elem in self.k_records.iteritems():
yield (elem.value, elem.count)
def hash_key_argument(arg):
if len(arg) == 32:
return arg.decode('hex')
elif len(arg) == 16:
return arg
raise TypeError("-h/--hash-key must be either 16 ASCII chars or 32 hex digits")
PARSER = argparse.ArgumentParser()
PARSER.add_argument('size', type=int,
help="The size of the Recordinality sketch")
PARSER.add_argument('-k', '--hash-key', type=hash_key_argument, default=None,
help="A key to use for the SipHash function (as 16 ASCII chars or 32 hex digits)")
PARSER.add_argument('-s', '--sample', action='store_true', default=False,
help="Capture a k-sized random sample from the stream, printing it afterwards")
def main():
args = PARSER.parse_args()
sketch = Recordinality(size=args.size, hash_key=args.hash_key, store_values=args.sample)
for line in sys.stdin:
sketch.add(line.rstrip('\r\n').encode('utf-8'))
print(sketch.cardinality())
if args.sample:
for value, count in sketch.sample:
print('{}\t{}'.format(value, count))
if __name__ == '__main__':
main() | recordinality.py |
from __future__ import print_function
import argparse
import math
import os
import struct
import sys
from csiphash import siphash24
from cskipdict import SkipDict
class Element(object):
__slots__ = ('value', 'count')
def __init__(self, value):
self.value = value
self.count = 1
class Recordinality(object):
def __init__(self, size, hash_key=None, store_values=True):
if hash_key is None:
hash_key = os.urandom(16)
self.hash = lambda val: struct.unpack('q', siphash24(hash_key, val))[0]
self.k_records = SkipDict()
self.size = size
self.modifications = 0
self.store_values = store_values
def add(self, value):
hash = self.hash(value)
if hash in self.k_records:
element = self.k_records[hash]
if self.store_values and element.value == value:
element.count += 1
elif len(self.k_records) < self.size:
self.k_records[hash] = Element(value if self.store_values else None)
self.modifications += 1
else:
min_key, min_val = self.k_records.minimum()
if min_key < hash:
del self.k_records[min_key]
self.k_records[hash] = Element(value if self.store_values else None)
self.modifications += 1
def cardinality(self):
# We have an exact cardinality up to self.size
if self.modifications <= self.size:
return self.modifications
pow = self.modifications - self.size + 1
estimate = (self.size * math.pow(1 + (1.0 / self.size), pow)) - 1
return int(estimate)
def error(self):
cardinality = self.cardinality()
return math.sqrt(math.pow(cardinality / (self.size * math.e), 1.0 / self.size) - 1)
@property
def sample(self):
if not self.store_values:
raise AttributeError("This Recordinality is not configured to store values for sampling")
for key, elem in self.k_records.iteritems():
yield (elem.value, elem.count)
def hash_key_argument(arg):
if len(arg) == 32:
return arg.decode('hex')
elif len(arg) == 16:
return arg
raise TypeError("-h/--hash-key must be either 16 ASCII chars or 32 hex digits")
PARSER = argparse.ArgumentParser()
PARSER.add_argument('size', type=int,
help="The size of the Recordinality sketch")
PARSER.add_argument('-k', '--hash-key', type=hash_key_argument, default=None,
help="A key to use for the SipHash function (as 16 ASCII chars or 32 hex digits)")
PARSER.add_argument('-s', '--sample', action='store_true', default=False,
help="Capture a k-sized random sample from the stream, printing it afterwards")
def main():
args = PARSER.parse_args()
sketch = Recordinality(size=args.size, hash_key=args.hash_key, store_values=args.sample)
for line in sys.stdin:
sketch.add(line.rstrip('\r\n').encode('utf-8'))
print(sketch.cardinality())
if args.sample:
for value, count in sketch.sample:
print('{}\t{}'.format(value, count))
if __name__ == '__main__':
main() | 0.564339 | 0.176672 |
import numpy as np
import soundfile as sf
from metprint import LogType, Logger, FHFormatter
from stegstash.lsb import LSB
exts = ["wav"]
def extNotLossless(fileName):
""" Output the file extension not lossless error """
Logger(FHFormatter()).logPrint(
"File extension is not lossless: " + fileName + "! Must be " + "one of \"" +
", \"".join(exts) + "\"", LogType.ERROR)
def encode(openPath, writePath, chars, soundMapSeed, password=""):
"""encode a sound file with data using lsb steganography
Args:
openPath (string): path to the original sound file to open
writePath (string): path to write the stego-sound file
data (string|bytes|<file>): data to encode
soundMapSeed (string): seed to generate the lsb map
password (str, optional): password to encrypt the data with. Defaults to "".
"""
data, samplerate, shape = openSound(openPath)
encodeLsb = LSB(data, data=chars)
data = encodeLsb.encode(soundMapSeed, password)
writeSound(writePath, data, samplerate, shape)
def decode(openPath, soundMapSeed, password="", zeroTerm=True, file=None):
"""decode data from a sound file using lsb steganography
Args:
openPath (string): path to the stego-sound file to decode
soundMapSeed (string): seed to generate the lsb map
password (str, optional): password to encrypt the data with. Defaults to "".
zeroTerm (boolean, optional): stop decoding on \x00 (NUL). Defaults to True.
file (<file>, optional): file pointer. Defaults to None.
Returns:
bytes: data from the sound file
"""
data, _samplerate, _shape = openSound(openPath)
decodeLsb = LSB(data)
return decodeLsb.decode(soundMapSeed, password, zeroTerm, file)
def simpleEncode(openPath, writePath, chars):
"""encode a sound file with data using lsb steganography
Args:
openPath (string): path to the original sound file to open
writePath (string): path to write the stego-sound file
data (string|bytes|<file>): data to encode
"""
data, samplerate, shape = openSound(openPath)
encodeLsb = LSB(data, data=chars)
data = encodeLsb.simpleEncode()
writeSound(writePath, data, samplerate, shape)
def simpleDecode(openPath, zeroTerm=True, file=None):
"""decode data from a sound file using lsb steganography
Args:
openPath (string): path to the stego-sound file to decode
zeroTerm (boolean, optional): stop decoding on \x00 (NUL). Defaults to True.
file (<file>, optional): file pointer. Defaults to None.
Returns:
bytes: data from the sound file
"""
data, _samplerate, _shape = openSound(openPath)
decodeLsb = LSB(data)
return decodeLsb.simpleDecode(zeroTerm, file)
def openSound(path):
""" open a sound file """
"""Open a sound file as a numpy array
Args:
path (string): path to the sound file to open
Returns:
numpy.array, int, Tuple(int): A 1D numpy array containing sound data,
sample rate, shape
"""
fileExt = path.split(".")[-1].lower()
if fileExt not in exts:
extNotLossless(path)
raise ValueError
data, samplerate = sf.read(path, always_2d=True, dtype='int16')
return (data.flatten() + 2**15).astype(np.uint16), samplerate, data.shape
def writeSound(path, sound, samplerate, shape):
"""Write a 1D numpy array to a sound file
Args:
path (string): path to the sound file to save
sound (numpy.array): 1D numpy array containing sound data
samplerate int: sample rate
shape (Tuple(int)): shape
"""
fileExt = path.split(".")[-1].lower()
if fileExt not in exts:
extNotLossless(path)
raise ValueError
sound = (sound - 2**15).astype(np.int16)
sf.write(path, sound.reshape(shape), samplerate) | stegstash/soundlsb.py | import numpy as np
import soundfile as sf
from metprint import LogType, Logger, FHFormatter
from stegstash.lsb import LSB
exts = ["wav"]
def extNotLossless(fileName):
""" Output the file extension not lossless error """
Logger(FHFormatter()).logPrint(
"File extension is not lossless: " + fileName + "! Must be " + "one of \"" +
", \"".join(exts) + "\"", LogType.ERROR)
def encode(openPath, writePath, chars, soundMapSeed, password=""):
"""encode a sound file with data using lsb steganography
Args:
openPath (string): path to the original sound file to open
writePath (string): path to write the stego-sound file
data (string|bytes|<file>): data to encode
soundMapSeed (string): seed to generate the lsb map
password (str, optional): password to encrypt the data with. Defaults to "".
"""
data, samplerate, shape = openSound(openPath)
encodeLsb = LSB(data, data=chars)
data = encodeLsb.encode(soundMapSeed, password)
writeSound(writePath, data, samplerate, shape)
def decode(openPath, soundMapSeed, password="", zeroTerm=True, file=None):
"""decode data from a sound file using lsb steganography
Args:
openPath (string): path to the stego-sound file to decode
soundMapSeed (string): seed to generate the lsb map
password (str, optional): password to encrypt the data with. Defaults to "".
zeroTerm (boolean, optional): stop decoding on \x00 (NUL). Defaults to True.
file (<file>, optional): file pointer. Defaults to None.
Returns:
bytes: data from the sound file
"""
data, _samplerate, _shape = openSound(openPath)
decodeLsb = LSB(data)
return decodeLsb.decode(soundMapSeed, password, zeroTerm, file)
def simpleEncode(openPath, writePath, chars):
"""encode a sound file with data using lsb steganography
Args:
openPath (string): path to the original sound file to open
writePath (string): path to write the stego-sound file
data (string|bytes|<file>): data to encode
"""
data, samplerate, shape = openSound(openPath)
encodeLsb = LSB(data, data=chars)
data = encodeLsb.simpleEncode()
writeSound(writePath, data, samplerate, shape)
def simpleDecode(openPath, zeroTerm=True, file=None):
"""decode data from a sound file using lsb steganography
Args:
openPath (string): path to the stego-sound file to decode
zeroTerm (boolean, optional): stop decoding on \x00 (NUL). Defaults to True.
file (<file>, optional): file pointer. Defaults to None.
Returns:
bytes: data from the sound file
"""
data, _samplerate, _shape = openSound(openPath)
decodeLsb = LSB(data)
return decodeLsb.simpleDecode(zeroTerm, file)
def openSound(path):
""" open a sound file """
"""Open a sound file as a numpy array
Args:
path (string): path to the sound file to open
Returns:
numpy.array, int, Tuple(int): A 1D numpy array containing sound data,
sample rate, shape
"""
fileExt = path.split(".")[-1].lower()
if fileExt not in exts:
extNotLossless(path)
raise ValueError
data, samplerate = sf.read(path, always_2d=True, dtype='int16')
return (data.flatten() + 2**15).astype(np.uint16), samplerate, data.shape
def writeSound(path, sound, samplerate, shape):
"""Write a 1D numpy array to a sound file
Args:
path (string): path to the sound file to save
sound (numpy.array): 1D numpy array containing sound data
samplerate int: sample rate
shape (Tuple(int)): shape
"""
fileExt = path.split(".")[-1].lower()
if fileExt not in exts:
extNotLossless(path)
raise ValueError
sound = (sound - 2**15).astype(np.int16)
sf.write(path, sound.reshape(shape), samplerate) | 0.817101 | 0.42173 |
import logging
import crochet
import requests
import fido
from yelp_bytes import to_bytes
from bravado_core.response import IncomingResponse
from bravado.http_client import HttpClient
from bravado.http_future import FutureAdapter
from bravado.http_future import HttpFuture
log = logging.getLogger(__name__)
class FidoResponseAdapter(IncomingResponse):
"""Wraps a fido.fido.Response object to provide a uniform interface
to the response innards.
:type fido_response: :class:`fido.fido.Response`
"""
def __init__(self, fido_response):
self._delegate = fido_response
@property
def status_code(self):
return self._delegate.code
@property
def text(self):
return self._delegate.body
@property
def reason(self):
return self._delegate.reason
@property
def headers(self):
return self._delegate.headers
def json(self, **_):
# TODO: pass the kwargs downstream
return self._delegate.json()
class FidoClient(HttpClient):
"""Fido (Asynchronous) HTTP client implementation.
"""
def request(self, request_params, operation=None, response_callbacks=None,
also_return_response=False):
"""Sets up the request params as per Twisted Agent needs.
Sets up crochet and triggers the API request in background
:param request_params: request parameters for the http request.
:type request_params: dict
:param operation: operation that this http request is for. Defaults
to None - in which case, we're obviously just retrieving a Swagger
Spec.
:type operation: :class:`bravado_core.operation.Operation`
:param response_callbacks: List of callables to post-process the
incoming response. Expects args incoming_response and operation.
:param also_return_response: Consult the constructor documentation for
:class:`bravado.http_future.HttpFuture`.
:rtype: :class: `bravado_core.http_future.HttpFuture`
"""
request_for_twisted = self.prepare_request_for_twisted(request_params)
future_adapter = FidoFutureAdapter(fido.fetch(**request_for_twisted))
return HttpFuture(future_adapter,
FidoResponseAdapter,
operation,
response_callbacks,
also_return_response)
@staticmethod
def prepare_request_for_twisted(request_params):
"""
Uses the python package 'requests' to prepare the data as per twisted
needs. requests.PreparedRequest.prepare is able to compute the body and
the headers for the http call based on the input request_params. This
contains any query parameters, files, body and headers to include.
:return: dictionary in the form
{
'body': string, # (can represent any content-type i.e. json,
file, multipart..),
'headers': dictionary, # headers->values
'method': string, # can be 'GET', 'POST' etc.
'url': string,
'timeout': float, # optional
'connect_timeout': float, # optional
}
"""
prepared_request = requests.PreparedRequest()
prepared_request.prepare(
headers=request_params.get('headers'),
data=request_params.get('data'),
params=request_params.get('params'),
files=request_params.get('files'),
url=request_params.get('url'),
method=request_params.get('method')
)
# content-length was computed by 'requests' based on the current body
# but body will be processed by fido using twisted FileBodyProducer
# causing content-length to lose meaning and break the client.
prepared_request.headers.pop('Content-Length', None)
request_for_twisted = {
# converting to string for `requests` method is necessary when
# using requests < 2.8.1 due to a bug while handling unicode values
# See changelog 2.8.1 at https://pypi.python.org/pypi/requests
'method': str(prepared_request.method or 'GET'),
'body': (
to_bytes(prepared_request.body)
if prepared_request.body is not None else None
),
'headers': prepared_request.headers,
'url': prepared_request.url,
}
for fetch_kwarg in ('connect_timeout', 'timeout'):
if fetch_kwarg in request_params:
request_for_twisted[fetch_kwarg] = request_params[fetch_kwarg]
return request_for_twisted
class FidoFutureAdapter(FutureAdapter):
"""
This is just a wrapper for an EventualResult object from crochet.
It implements the 'result' method which is needed by our HttpFuture to
retrieve results.
"""
def __init__(self, eventual_result):
self._eventual_result = eventual_result
def result(self, timeout=None):
try:
return self._eventual_result.wait(timeout=timeout)
except crochet.TimeoutError:
self._eventual_result.cancel()
raise | bravado/fido_client.py | import logging
import crochet
import requests
import fido
from yelp_bytes import to_bytes
from bravado_core.response import IncomingResponse
from bravado.http_client import HttpClient
from bravado.http_future import FutureAdapter
from bravado.http_future import HttpFuture
log = logging.getLogger(__name__)
class FidoResponseAdapter(IncomingResponse):
"""Wraps a fido.fido.Response object to provide a uniform interface
to the response innards.
:type fido_response: :class:`fido.fido.Response`
"""
def __init__(self, fido_response):
self._delegate = fido_response
@property
def status_code(self):
return self._delegate.code
@property
def text(self):
return self._delegate.body
@property
def reason(self):
return self._delegate.reason
@property
def headers(self):
return self._delegate.headers
def json(self, **_):
# TODO: pass the kwargs downstream
return self._delegate.json()
class FidoClient(HttpClient):
"""Fido (Asynchronous) HTTP client implementation.
"""
def request(self, request_params, operation=None, response_callbacks=None,
also_return_response=False):
"""Sets up the request params as per Twisted Agent needs.
Sets up crochet and triggers the API request in background
:param request_params: request parameters for the http request.
:type request_params: dict
:param operation: operation that this http request is for. Defaults
to None - in which case, we're obviously just retrieving a Swagger
Spec.
:type operation: :class:`bravado_core.operation.Operation`
:param response_callbacks: List of callables to post-process the
incoming response. Expects args incoming_response and operation.
:param also_return_response: Consult the constructor documentation for
:class:`bravado.http_future.HttpFuture`.
:rtype: :class: `bravado_core.http_future.HttpFuture`
"""
request_for_twisted = self.prepare_request_for_twisted(request_params)
future_adapter = FidoFutureAdapter(fido.fetch(**request_for_twisted))
return HttpFuture(future_adapter,
FidoResponseAdapter,
operation,
response_callbacks,
also_return_response)
@staticmethod
def prepare_request_for_twisted(request_params):
"""
Uses the python package 'requests' to prepare the data as per twisted
needs. requests.PreparedRequest.prepare is able to compute the body and
the headers for the http call based on the input request_params. This
contains any query parameters, files, body and headers to include.
:return: dictionary in the form
{
'body': string, # (can represent any content-type i.e. json,
file, multipart..),
'headers': dictionary, # headers->values
'method': string, # can be 'GET', 'POST' etc.
'url': string,
'timeout': float, # optional
'connect_timeout': float, # optional
}
"""
prepared_request = requests.PreparedRequest()
prepared_request.prepare(
headers=request_params.get('headers'),
data=request_params.get('data'),
params=request_params.get('params'),
files=request_params.get('files'),
url=request_params.get('url'),
method=request_params.get('method')
)
# content-length was computed by 'requests' based on the current body
# but body will be processed by fido using twisted FileBodyProducer
# causing content-length to lose meaning and break the client.
prepared_request.headers.pop('Content-Length', None)
request_for_twisted = {
# converting to string for `requests` method is necessary when
# using requests < 2.8.1 due to a bug while handling unicode values
# See changelog 2.8.1 at https://pypi.python.org/pypi/requests
'method': str(prepared_request.method or 'GET'),
'body': (
to_bytes(prepared_request.body)
if prepared_request.body is not None else None
),
'headers': prepared_request.headers,
'url': prepared_request.url,
}
for fetch_kwarg in ('connect_timeout', 'timeout'):
if fetch_kwarg in request_params:
request_for_twisted[fetch_kwarg] = request_params[fetch_kwarg]
return request_for_twisted
class FidoFutureAdapter(FutureAdapter):
"""
This is just a wrapper for an EventualResult object from crochet.
It implements the 'result' method which is needed by our HttpFuture to
retrieve results.
"""
def __init__(self, eventual_result):
self._eventual_result = eventual_result
def result(self, timeout=None):
try:
return self._eventual_result.wait(timeout=timeout)
except crochet.TimeoutError:
self._eventual_result.cancel()
raise | 0.560974 | 0.120103 |
from scipy.stats import shapiro
import scipy.stats as stats
import numpy as np
import pandas as pd
import yaml
from utils import *
# A/B Testing Function - Quick Solution
def perform_ab_test(dataframe, group, target, group_a, group_b):
# Split A/B
groupA = dataframe[dataframe[group] == group_a][target]
groupB = dataframe[dataframe[group] == group_b][target]
# Assumption: Normality
ntA = shapiro(groupA)[1] < 0.05
ntB = shapiro(groupB)[1] < 0.05
# H0: Distribution is Normal! - False
# H1: Distribution is not Normal! - True
if (ntA == False) & (ntB == False): # "H0: Normal Distribution"
# Parametric Test
# Assumption: Homogeneity of variances
leveneTest = stats.levene(groupA, groupB)[1] < 0.05
# H0: Homogeneity: False
# H1: Heterogeneous: True
if leveneTest == False:
# Homogeneity
ttest = stats.ttest_ind(groupA, groupB, equal_var=True)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
else:
# Heterogeneous
ttest = stats.ttest_ind(groupA, groupB, equal_var=False)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
else:
# Non-Parametric Test
ttest = stats.mannwhitneyu(groupA, groupB)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
# Result
temp = pd.DataFrame({
"AB Hypothesis":[ttest < 0.05],
"p-value":[ttest]
})
temp["Test Type"] = np.where((ntA == False) & (ntB == False), "Parametric", "Non-Parametric")
temp["AB Hypothesis"] = np.where(temp["AB Hypothesis"] == False, "Fail to Reject H0", "Reject H0")
temp["Comment"] = np.where(temp["AB Hypothesis"] == "Fail to Reject H0", "A/B groups are similar!", "A/B groups are not similar!")
# Columns
if (ntA == False) & (ntB == False):
temp["Homogeneity"] = np.where(leveneTest == False, "Yes", "No")
temp = temp[["Test Type", "Homogeneity","AB Hypothesis", "p-value", "Comment"]]
else:
temp = temp[["Test Type","AB Hypothesis", "p-value", "Comment"]]
# Print Hypothesis
print("# A/B Testing Hypothesis for {}".format(target))
print("H0: A == B")
print("H1: A != B", "\n")
return temp
if __name__ == '__main__':
config = load_config('config.yaml')
dataframe = pd.read_csv(config['data_path'])
group = config['group']
target = config['target']
group_a = config['group_a']
group_b = config['group_b']
print(perform_ab_test(
dataframe = dataframe,
group = group, target = target,
group_a = group_a, group_b = group_b
)
) | src/perform_abtest.py | from scipy.stats import shapiro
import scipy.stats as stats
import numpy as np
import pandas as pd
import yaml
from utils import *
# A/B Testing Function - Quick Solution
def perform_ab_test(dataframe, group, target, group_a, group_b):
# Split A/B
groupA = dataframe[dataframe[group] == group_a][target]
groupB = dataframe[dataframe[group] == group_b][target]
# Assumption: Normality
ntA = shapiro(groupA)[1] < 0.05
ntB = shapiro(groupB)[1] < 0.05
# H0: Distribution is Normal! - False
# H1: Distribution is not Normal! - True
if (ntA == False) & (ntB == False): # "H0: Normal Distribution"
# Parametric Test
# Assumption: Homogeneity of variances
leveneTest = stats.levene(groupA, groupB)[1] < 0.05
# H0: Homogeneity: False
# H1: Heterogeneous: True
if leveneTest == False:
# Homogeneity
ttest = stats.ttest_ind(groupA, groupB, equal_var=True)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
else:
# Heterogeneous
ttest = stats.ttest_ind(groupA, groupB, equal_var=False)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
else:
# Non-Parametric Test
ttest = stats.mannwhitneyu(groupA, groupB)[1]
# H0: M1 == M2 - False
# H1: M1 != M2 - True
# Result
temp = pd.DataFrame({
"AB Hypothesis":[ttest < 0.05],
"p-value":[ttest]
})
temp["Test Type"] = np.where((ntA == False) & (ntB == False), "Parametric", "Non-Parametric")
temp["AB Hypothesis"] = np.where(temp["AB Hypothesis"] == False, "Fail to Reject H0", "Reject H0")
temp["Comment"] = np.where(temp["AB Hypothesis"] == "Fail to Reject H0", "A/B groups are similar!", "A/B groups are not similar!")
# Columns
if (ntA == False) & (ntB == False):
temp["Homogeneity"] = np.where(leveneTest == False, "Yes", "No")
temp = temp[["Test Type", "Homogeneity","AB Hypothesis", "p-value", "Comment"]]
else:
temp = temp[["Test Type","AB Hypothesis", "p-value", "Comment"]]
# Print Hypothesis
print("# A/B Testing Hypothesis for {}".format(target))
print("H0: A == B")
print("H1: A != B", "\n")
return temp
if __name__ == '__main__':
config = load_config('config.yaml')
dataframe = pd.read_csv(config['data_path'])
group = config['group']
target = config['target']
group_a = config['group_a']
group_b = config['group_b']
print(perform_ab_test(
dataframe = dataframe,
group = group, target = target,
group_a = group_a, group_b = group_b
)
) | 0.408041 | 0.407805 |
import argparse
import os
import sys
import requests
# constants
API_URL = 'https://api.github.com'
class PullRequest:
"""Pull Request class"""
def __init__(self,
head_owner, head, head_token,
base_owner, repo, base, base_token):
self.head_owner = head_owner
self.head = head
self.base_owner = base_owner
self.base_repo = repo
self.base = base
self.pulls_url = f'{API_URL}/repos/{self.base_owner}/{self.base_repo}/pulls'
self._head_auth_headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f"token {head_token}"
}
self._base_auth_headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f"token {base_token}"
}
def get_open(self):
"""get open pull request if existed"""
params = {
'state': 'open',
'head': f"{self.head_owner}:{self.head}",
'base': self.base,
}
print(params)
r = requests.get(self.pulls_url, headers=self._base_auth_headers, params=params)
if r.status_code == 200:
return r.json()
if r.status_code == 304:
return None
# FAILURE
print('FAILURE - list PR')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to create PR: {r.json()}")
def create(self, params):
"""create a pull request"""
# the token here must have write access to head owner/repo
r = requests.post(self.pulls_url, headers=self._head_auth_headers, json=params)
if r.status_code == 201:
print('SUCCESS - create PR')
pull = r.json()
number = str(pull['number'])
sha = str(pull['head']['sha'])
return number, sha, False
if r.status_code == 422: # early-terminate if no commits between HEAD and BASE
print('SUCCESS - No commits')
print(r.json())
return '', '', True
# FAILURE
print('FAILURE - create PR')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to create PR: {r.json()}")
def merge(self, number, params):
"""merge a pull request"""
# the token here must have write access to base owner/repo
url = f'{self.pulls_url}/{number}/merge'
return requests.put(url, headers=self._head_auth_headers, json=params)
def auto_merge(self, number, sha):
"""merge a auto-merge pull request"""
params = {
'sha': sha,
'merge_method': 'merge'
}
r = self.merge(number, params)
if r.status_code == 200:
self.comment(number, '**SUCCESS** - auto-merge')
print('SUCCESS - auto-merge')
return
else:
print('FAILURE - auto-merge')
self.comment(number=number, content=f"""**FAILURE** - Unable to auto-merge. Manual operation is required.
```
{r.json()}
```
Please use the following steps to fix the merge conflicts manually:
```
# Assume upstream is {self.base_owner}/{self.base_repo} remote
git fetch upstream {self.head} {self.base}
git checkout -b fix-auto-merge-conflict-{number} upstream/{self.base}
git merge upstream/{self.head}
# Fix any merge conflicts caused by this merge
git commit -am "Merge {self.head} into {self.base}"
git push <personal fork> fix-auto-merge-conflict-{number}
# Open a PR targets {self.base_owner}/{self.base_repo} {self.base}
```
**IMPORTANT:** Before merging this PR, be sure to change the merging strategy to `Create a merge commit` (repo admin only).
Once this PR is merged, the auto-merge PR should automatically be closed since it contains the same commit hashes
""")
print(f'status code: {r.status_code}')
raise Exception(f"Failed to auto-merge PR: {r.json()}")
def comment(self, number, content):
"""comment in a pull request"""
url = f'{API_URL}/repos/{self.base_owner}/{self.base_repo}/issues/{number}/comments'
params = {
'body': content
}
r = requests.post(url, headers=self._base_auth_headers, json=params)
if r.status_code == 201:
print('SUCCESS - create comment')
else:
print('FAILURE - create comment')
print(f'status code: {r.status_code}')
print(r.json())
class EnvDefault(argparse.Action):
"""EnvDefault argparse action class"""
def __init__(self, env, default=None, required=True, **kwargs):
if not default and env:
if env in os.environ:
default = os.environ[env]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values) | .github/workflows/action-helper/python/utils.py | {r.json()}
# Assume upstream is {self.base_owner}/{self.base_repo} remote
git fetch upstream {self.head} {self.base}
git checkout -b fix-auto-merge-conflict-{number} upstream/{self.base}
git merge upstream/{self.head}
# Fix any merge conflicts caused by this merge
git commit -am "Merge {self.head} into {self.base}"
git push <personal fork> fix-auto-merge-conflict-{number}
# Open a PR targets {self.base_owner}/{self.base_repo} {self.base} | 0.488771 | 0.17441 |
from datetime import datetime
import unittest
import mox
from stacktach.models import RawData, GlanceRawData, GenericRawData
from stacktach.models import ImageDeletes, InstanceExists, ImageExists
from tests.unit.utils import IMAGE_UUID_1
from stacktach import datetime_to_decimal as dt
from tests.unit import StacktachBaseTestCase
class ModelsTestCase(StacktachBaseTestCase):
def test_get_name_for_rawdata(self):
self.assertEquals(RawData.get_name(), 'RawData')
def test_get_name_for_glancerawdata(self):
self.assertEquals(GlanceRawData.get_name(), 'GlanceRawData')
def test_get_name_for_genericrawdata(self):
self.assertEquals(GenericRawData.get_name(), 'GenericRawData')
class ImageDeletesTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_find_delete_should_return_delete_issued_before_given_time(self):
delete = self.mox.CreateMockAnything()
deleted_max = datetime.utcnow()
self.mox.StubOutWithMock(ImageDeletes.objects, 'filter')
ImageDeletes.objects.filter(
uuid=IMAGE_UUID_1,
deleted_at__lte=dt.dt_to_decimal(deleted_max)).AndReturn(delete)
self.mox.ReplayAll()
self.assertEquals(ImageDeletes.find(
IMAGE_UUID_1, deleted_max), delete)
self.mox.VerifyAll()
def test_find_delete_should_return_delete_with_the_given_uuid(self):
delete = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(ImageDeletes.objects, 'filter')
ImageDeletes.objects.filter(uuid=IMAGE_UUID_1).AndReturn(delete)
self.mox.ReplayAll()
self.assertEquals(ImageDeletes.find(IMAGE_UUID_1, None), delete)
self.mox.VerifyAll()
class ImageExistsTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_group_exists_with_date_status_in_audit_period_by_owner_rawid(self):
end_max = datetime.utcnow()
status = 'pending'
exist1 = self.mox.CreateMockAnything()
exist1.owner = "owner1"
exist1.raw_id = "1"
exist2 = self.mox.CreateMockAnything()
exist2.owner = "owner2"
exist2.raw_id = "2"
exist3 = self.mox.CreateMockAnything()
exist3.owner = "owner1"
exist3.raw_id = "1"
exist4 = self.mox.CreateMockAnything()
exist4.owner = "owner1"
exist4.raw_id = "3"
ordered_results = [exist1, exist3, exist4, exist2]
unordered_results = self.mox.CreateMockAnything()
related_results = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(ImageExists.objects, 'select_related')
ImageExists.objects.select_related().AndReturn(related_results)
related_results.filter(
audit_period_ending__lte=dt.dt_to_decimal(end_max),
status=status).AndReturn(unordered_results)
unordered_results.order_by('owner').AndReturn(ordered_results)
self.mox.ReplayAll()
results = ImageExists.find_and_group_by_owner_and_raw_id(end_max,
status)
self.mox.VerifyAll()
self.assertEqual(results, {'owner1-1': [exist1, exist3],
'owner1-3': [exist4],
'owner2-2': [exist2]})
def test_mark_exists_as_sent_unverified(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
exist3 = self.mox.CreateMockAnything()
exist3.status = "pending"
exist3.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn(
[exist3])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([], []))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.assertEqual(exist3.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_return_absent_exists(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn([])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, (['9156b83e-f684-4ec3-8f94-7e41902f27aa'],
[]))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_and_return_exist_not_pending(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "verified"
exist3 = self.mox.CreateMockAnything()
exist3.status = "pending"
exist3.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn(
[exist3])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([],
["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b"]))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist3.send_status, '201')
self.mox.VerifyAll()
class InstanceExistsTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_find_should_return_records_with_date_and_status_in_audit_period(self):
end_max = datetime.utcnow()
status = 'pending'
unordered_results = self.mox.CreateMockAnything()
expected_results = [1, 2]
related_results = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(InstanceExists.objects, 'select_related')
InstanceExists.objects.select_related().AndReturn(related_results)
related_results.filter(audit_period_ending__lte=dt.dt_to_decimal(
end_max), status=status).AndReturn(unordered_results)
unordered_results.order_by('id').AndReturn(expected_results)
self.mox.ReplayAll()
results = InstanceExists.find(end_max, status)
self.mox.VerifyAll()
self.assertEqual(results, [1, 2])
def test_mark_exists_as_sent_unverified(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndReturn(exist2)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([], []))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_return_absent_exists(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndRaise(
Exception)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, (['9156b83e-f684-4ec3-8f94-7e41902f27aa'],
[]))
self.assertEqual(exist1.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_and_return_exist_not_pending(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "verified"
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndReturn(exist2)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([],
["9156b83e-f684-4ec3-8f94-7e41902f27aa"]))
self.assertEqual(exist1.send_status, '201')
self.mox.VerifyAll() | tests/unit/test_models.py | from datetime import datetime
import unittest
import mox
from stacktach.models import RawData, GlanceRawData, GenericRawData
from stacktach.models import ImageDeletes, InstanceExists, ImageExists
from tests.unit.utils import IMAGE_UUID_1
from stacktach import datetime_to_decimal as dt
from tests.unit import StacktachBaseTestCase
class ModelsTestCase(StacktachBaseTestCase):
def test_get_name_for_rawdata(self):
self.assertEquals(RawData.get_name(), 'RawData')
def test_get_name_for_glancerawdata(self):
self.assertEquals(GlanceRawData.get_name(), 'GlanceRawData')
def test_get_name_for_genericrawdata(self):
self.assertEquals(GenericRawData.get_name(), 'GenericRawData')
class ImageDeletesTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_find_delete_should_return_delete_issued_before_given_time(self):
delete = self.mox.CreateMockAnything()
deleted_max = datetime.utcnow()
self.mox.StubOutWithMock(ImageDeletes.objects, 'filter')
ImageDeletes.objects.filter(
uuid=IMAGE_UUID_1,
deleted_at__lte=dt.dt_to_decimal(deleted_max)).AndReturn(delete)
self.mox.ReplayAll()
self.assertEquals(ImageDeletes.find(
IMAGE_UUID_1, deleted_max), delete)
self.mox.VerifyAll()
def test_find_delete_should_return_delete_with_the_given_uuid(self):
delete = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(ImageDeletes.objects, 'filter')
ImageDeletes.objects.filter(uuid=IMAGE_UUID_1).AndReturn(delete)
self.mox.ReplayAll()
self.assertEquals(ImageDeletes.find(IMAGE_UUID_1, None), delete)
self.mox.VerifyAll()
class ImageExistsTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_group_exists_with_date_status_in_audit_period_by_owner_rawid(self):
end_max = datetime.utcnow()
status = 'pending'
exist1 = self.mox.CreateMockAnything()
exist1.owner = "owner1"
exist1.raw_id = "1"
exist2 = self.mox.CreateMockAnything()
exist2.owner = "owner2"
exist2.raw_id = "2"
exist3 = self.mox.CreateMockAnything()
exist3.owner = "owner1"
exist3.raw_id = "1"
exist4 = self.mox.CreateMockAnything()
exist4.owner = "owner1"
exist4.raw_id = "3"
ordered_results = [exist1, exist3, exist4, exist2]
unordered_results = self.mox.CreateMockAnything()
related_results = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(ImageExists.objects, 'select_related')
ImageExists.objects.select_related().AndReturn(related_results)
related_results.filter(
audit_period_ending__lte=dt.dt_to_decimal(end_max),
status=status).AndReturn(unordered_results)
unordered_results.order_by('owner').AndReturn(ordered_results)
self.mox.ReplayAll()
results = ImageExists.find_and_group_by_owner_and_raw_id(end_max,
status)
self.mox.VerifyAll()
self.assertEqual(results, {'owner1-1': [exist1, exist3],
'owner1-3': [exist4],
'owner2-2': [exist2]})
def test_mark_exists_as_sent_unverified(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
exist3 = self.mox.CreateMockAnything()
exist3.status = "pending"
exist3.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn(
[exist3])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([], []))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.assertEqual(exist3.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_return_absent_exists(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn([])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, (['9156b83e-f684-4ec3-8f94-7e41902f27aa'],
[]))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_and_return_exist_not_pending(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "verified"
exist3 = self.mox.CreateMockAnything()
exist3.status = "pending"
exist3.save()
self.mox.StubOutWithMock(ImageExists.objects, 'filter')
ImageExists.objects.filter(message_id=message_ids[0]).AndReturn(
[exist1, exist2])
ImageExists.objects.filter(message_id=message_ids[1]).AndReturn(
[exist3])
self.mox.ReplayAll()
results = ImageExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([],
["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b"]))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist3.send_status, '201')
self.mox.VerifyAll()
class InstanceExistsTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
def test_find_should_return_records_with_date_and_status_in_audit_period(self):
end_max = datetime.utcnow()
status = 'pending'
unordered_results = self.mox.CreateMockAnything()
expected_results = [1, 2]
related_results = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(InstanceExists.objects, 'select_related')
InstanceExists.objects.select_related().AndReturn(related_results)
related_results.filter(audit_period_ending__lte=dt.dt_to_decimal(
end_max), status=status).AndReturn(unordered_results)
unordered_results.order_by('id').AndReturn(expected_results)
self.mox.ReplayAll()
results = InstanceExists.find(end_max, status)
self.mox.VerifyAll()
self.assertEqual(results, [1, 2])
def test_mark_exists_as_sent_unverified(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "pending"
exist2.save()
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndReturn(exist2)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([], []))
self.assertEqual(exist1.send_status, '201')
self.assertEqual(exist2.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_return_absent_exists(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndRaise(
Exception)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, (['9156b83e-f684-4ec3-8f94-7e41902f27aa'],
[]))
self.assertEqual(exist1.send_status, '201')
self.mox.VerifyAll()
def test_mark_exists_as_sent_unverified_and_return_exist_not_pending(self):
message_ids = ["0708cb0b-6169-4d7c-9f58-3cf3d5bf694b",
"9156b83e-f684-4ec3-8f94-7e41902f27aa"]
exist1 = self.mox.CreateMockAnything()
exist1.status = "pending"
exist1.save()
exist2 = self.mox.CreateMockAnything()
exist2.status = "verified"
self.mox.StubOutWithMock(InstanceExists.objects, 'get')
InstanceExists.objects.get(message_id=message_ids[0]).AndReturn(exist1)
InstanceExists.objects.get(message_id=message_ids[1]).AndReturn(exist2)
self.mox.ReplayAll()
results = InstanceExists.mark_exists_as_sent_unverified(message_ids)
self.assertEqual(results, ([],
["9156b83e-f684-4ec3-8f94-7e41902f27aa"]))
self.assertEqual(exist1.send_status, '201')
self.mox.VerifyAll() | 0.658418 | 0.207777 |
# Things that won't be done
# - Statics won't be name mangled
# - __LINE__ and __FILE__ won't work well
import sys
import pathlib
import shutil
# note: all files have to be in same directory
def add_file(imports, file_name, out_file, header_guard):
if file_name in imports: return
if not file_name.endswith(".incl"): imports.append(file_name)
if not pathlib.Path(file_name).is_file(): return
with open(file_name) as f:
for line in f:
if line.strip().startswith("#include"):
to_import = line.split("#include")[1].strip()
if to_import.startswith('"'):
# local incl
fname = to_import.strip('"')
if '/' in fname: fname = fname.split('/')[-1]
fname = 'tmp_dir/' + fname
add_file(imports, fname, out_file, header_guard)
elif to_import not in imports:
# system incl
imports.append(to_import)
out_file.write(line)
elif not line.startswith("#") or header_guard not in line or "_H" not in line:
out_file.write(line)
def filter_multiple_newlines(out_name, file_name):
prev_empty = False
with open(out_name, "r") as in_file:
with open(file_name, "w") as out:
for line in in_file:
stripped_line = line.strip()
if stripped_line:
prev_empty = False
out.write(line)
elif not prev_empty:
out.write(line)
prev_empty = not stripped_line
def files_in_folder(folder):
return (str(f) for f in pathlib.Path(folder).rglob("*") \
if f.is_file() and f.suffix in ['.c', '.h', '.incl'])
def main():
if len(sys.argv) < 5:
print("Usage is: python3 ssc.py <out_name> "
"<header_guard_start> <paths>")
return
shutil.rmtree('tmp_dir', ignore_errors=True)
pathlib.Path('tmp_dir').mkdir(parents=False, exist_ok=False)
out_name = sys.argv[1]
out_is_header = out_name.endswith(".h")
header_guard = sys.argv[2]
paths = sys.argv[3:]
imports = []
with open('tmp_file', "w") as out:
out.write("/* AUTO GENERATED */\n")
if out_is_header: out.write("#ifndef " + header_guard + "_H\n#define " + header_guard + "_H\n\n")
for f in [f for path in paths for f in files_in_folder(path)]:
shutil.copy(f, "tmp_dir")
all_files = sorted(files_in_folder('tmp_dir'), key = lambda x: x[-2:] != ".h")
for f in all_files:
if not f.endswith('.incl'): add_file(imports, f, out, header_guard)
if out_is_header: out.write("\n#endif\n")
shutil.rmtree('tmp_dir', ignore_errors=True)
filter_multiple_newlines('tmp_file', out_name)
pathlib.Path('tmp_file').unlink()
if __name__ == "__main__":
main() | ssc.py |
# Things that won't be done
# - Statics won't be name mangled
# - __LINE__ and __FILE__ won't work well
import sys
import pathlib
import shutil
# note: all files have to be in same directory
def add_file(imports, file_name, out_file, header_guard):
if file_name in imports: return
if not file_name.endswith(".incl"): imports.append(file_name)
if not pathlib.Path(file_name).is_file(): return
with open(file_name) as f:
for line in f:
if line.strip().startswith("#include"):
to_import = line.split("#include")[1].strip()
if to_import.startswith('"'):
# local incl
fname = to_import.strip('"')
if '/' in fname: fname = fname.split('/')[-1]
fname = 'tmp_dir/' + fname
add_file(imports, fname, out_file, header_guard)
elif to_import not in imports:
# system incl
imports.append(to_import)
out_file.write(line)
elif not line.startswith("#") or header_guard not in line or "_H" not in line:
out_file.write(line)
def filter_multiple_newlines(out_name, file_name):
prev_empty = False
with open(out_name, "r") as in_file:
with open(file_name, "w") as out:
for line in in_file:
stripped_line = line.strip()
if stripped_line:
prev_empty = False
out.write(line)
elif not prev_empty:
out.write(line)
prev_empty = not stripped_line
def files_in_folder(folder):
return (str(f) for f in pathlib.Path(folder).rglob("*") \
if f.is_file() and f.suffix in ['.c', '.h', '.incl'])
def main():
if len(sys.argv) < 5:
print("Usage is: python3 ssc.py <out_name> "
"<header_guard_start> <paths>")
return
shutil.rmtree('tmp_dir', ignore_errors=True)
pathlib.Path('tmp_dir').mkdir(parents=False, exist_ok=False)
out_name = sys.argv[1]
out_is_header = out_name.endswith(".h")
header_guard = sys.argv[2]
paths = sys.argv[3:]
imports = []
with open('tmp_file', "w") as out:
out.write("/* AUTO GENERATED */\n")
if out_is_header: out.write("#ifndef " + header_guard + "_H\n#define " + header_guard + "_H\n\n")
for f in [f for path in paths for f in files_in_folder(path)]:
shutil.copy(f, "tmp_dir")
all_files = sorted(files_in_folder('tmp_dir'), key = lambda x: x[-2:] != ".h")
for f in all_files:
if not f.endswith('.incl'): add_file(imports, f, out, header_guard)
if out_is_header: out.write("\n#endif\n")
shutil.rmtree('tmp_dir', ignore_errors=True)
filter_multiple_newlines('tmp_file', out_name)
pathlib.Path('tmp_file').unlink()
if __name__ == "__main__":
main() | 0.141845 | 0.05634 |
import csv
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple, Union
import pandas as pd
from pulp import LpMaximize, LpProblem, LpVariable, lpSum, PULP_CBC_CMD
from dfs import constraints
from dfs import data_frame_utils, pulp_utils
from dfs import file_utils
from dfs.exceptions import InvalidDataFrameException, UnsolvableLineupException, InvalidConstraintException
from dfs.nfl.positions import RB, WR, TE, FLEX, normalize_position
from dfs.slate import GameSlate
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class OptimizedLineup:
"""
A class that represents an optimized fantasy football lineup for a given site.
"""
def __init__(self, optimizer: 'LineupOptimizer', site: str):
"""
:param optimizer: The optimizer used to generate this lineup.
:param site: The fantasy site that the lineup has been generated for.
"""
self.site = site
players = optimizer.data[optimizer.data.apply(lambda x: x['LpVariable'].varValue == 1, axis=1)]
self.points = round(players[optimizer.points_col].sum(), 2)
self.salary = players[optimizer.salary_col].sum()
self.salary_cap = optimizer.salary_cap()
col_mapping = {
optimizer.id_col: 'id',
optimizer.name_col: 'name',
optimizer.position_col: 'position',
optimizer.team_col: 'team',
optimizer.opponent_col: 'opponent',
optimizer.points_col: 'points',
optimizer.salary_col: 'salary',
optimizer.datetime_col: 'datetime',
optimizer.is_home_col: 'is_home'
}
players_dict = players.to_dict('records')
position_to_count = dict()
for p in players_dict:
for k, v in col_mapping.items():
if k is not None: # id_col may be None
p[v] = p.pop(k)
keys_to_delete = []
for k in p.keys():
if k not in col_mapping.values():
keys_to_delete.append(k)
for k in keys_to_delete:
del p[k]
if p['position'] in position_to_count:
position_to_count[p['position']] = position_to_count[p['position']] + 1
else:
position_to_count[p['position']] = 1
self.players = [LineupPlayer(p) for p in players_dict]
for position in (RB, WR, TE):
_, maximum = optimizer.position_constraints()[position]
if position_to_count[position] == maximum:
logger.info(f"Flex position for this lineup is filled by {position}")
players_for_position = list(sorted(filter(lambda player: player.position == position, self.players),
key=lambda x: x.datetime))
players_for_position[-1].lineup_position = FLEX
break
self.players = sorted(self.players, key=lambda x: optimizer.player_order_dict()[x.lineup_position])
def write_to_file(self, file_path: str) -> None:
"""
Writes the optimized lineup to a CSV file.
:param file_path: the path to the file which will be created if it does not exist.
:return: None
:raises: ValueError if file_path is None or points to a non-CSV file.
"""
if file_path is None:
raise ValueError('File path cannot be none')
extension = file_utils.get_extension(file_path)
if extension != 'csv':
raise ValueError(f"Only CSV output is supported, found: {extension}")
file_exists = file_utils.file_exists(file_path)
with open(file_path, mode='a') as f:
writer = csv.DictWriter(f, fieldnames=dir(self.players[0]))
if not file_exists:
writer.writeheader()
writer.writerows([{k: player.__getattribute__(k) for k in dir(player)} for player in self.players])
def to_dict(self) -> Dict[str, Any]:
"""
Convert this optimized lineup into a dict.
:return: A dict representing this lineup.
"""
return {
'site': self.site,
'points': self.points,
'salary': self.salary,
'salary_cap': self.salary_cap,
'players': [p.to_dict() for p in self.players]
}
def __repr__(self):
return f"dfs.optimize.OptimizedLineup({self.to_dict()})"
def __str__(self):
players_string = '\n'.join([str(p) for p in self.players])
return (f"Optimized {self.site} Lineup \n"
f"{self.points} points @ {self.salary} salary \n" +
players_string)
class LineupPlayer:
"""
A model of a player included in an optimized lineup.
"""
def __init__(self, player_dict: dict):
"""
Initializer.
:param player_dict: the player dict corresponding to a row from the dataframe.
"""
self.name = player_dict['name']
self.position = player_dict['position']
self.lineup_position = player_dict['position']
self.team = player_dict['team']
self.opponent = player_dict['opponent']
self.points = player_dict['points']
self.salary = player_dict['salary']
self.datetime = player_dict['datetime']
self.is_home = player_dict['is_home']
def to_dict(self) -> Dict[str, Any]:
"""
Converts this lineup player into a dict.
:return: A dict representing this player.
"""
return {
'name': self.name,
'position': self.position,
'lineup_position': self.lineup_position,
'team': self.team,
'opponent': self.opponent,
'points': self.points,
'salary': self.salary,
'datetime': str(self.datetime),
'is_home': self.is_home
}
def __dir__(self):
return ['name', 'position', 'team', 'opponent', 'points', 'salary', 'datetime']
def __repr__(self):
return f"dfs.optimize.LineupPlayer({self.to_dict()})"
def __str__(self):
return f"{self.lineup_position} {self.name} - {self.team} - {self.points} @ {self.salary}"
class LineupOptimizer(ABC):
"""
A pandas data frame-based fantasy football lineup optimizer.
This class is used to generate optimal fantasy football lineups for various sites when provided
a data frame containing player, position, salary and points information.
"""
def __init__(self,
data_source: Union[pd.DataFrame, str],
name_col: str = 'name',
position_col: str = 'position',
year_col: str = 'year',
week_col: str = 'week',
points_col: str = 'points',
salary_col: str = 'salary',
team_col: str = 'team',
opponent_col: str = 'opponent',
datetime_col: str = 'datetime',
is_home_col: str = 'is_home',
id_col: str = None):
"""
:param data_source: A dataframe or file path containing fantasy data.
:param name_col: The player name column. Default is 'name'.
:param position_col: The player position column. Default is 'position'.
:param year_col: The year column. Default is 'year'.
:param week_col: The week column. Default is 'week'.
:param points_col: The fantasy points column. Default is 'points'.
:param salary_col: The player salary column. Default is 'salary'.
:param team_col: The player team column. Default is 'team'.
:param opponent_col: The player opponent column. Default is 'opponent'.
:param datetime_col: The datetime column. Default is 'datetime'.
:param is_home_col: The column to indicate whether or not the player is at home. Default is 'is_home'.
:param id_col: Optional ID column name.
"""
if type(data_source) is pd.DataFrame:
self._data = data_source.copy() # don't impact original dataframe
elif type(data_source) is str:
if not file_utils.file_exists(file=data_source):
raise ValueError('The data source file does not exist!')
extension = file_utils.get_extension(file_path=data_source)
if extension == 'csv':
self._data = pd.read_csv(filepath_or_buffer=data_source)
elif extension == 'xlsx':
self._data = pd.read_excel(io=data_source, engine='openpyxl')
else:
raise ValueError('Invalid data source file path! csv and xlsx are supported.')
else:
raise ValueError('Invalid data source type!')
if not all(c in self._data.columns for c in [name_col,
position_col,
year_col,
week_col,
points_col,
salary_col,
team_col,
opponent_col,
is_home_col,
datetime_col]):
raise InvalidDataFrameException('DataFrame does not contain necessary columns')
if id_col is not None:
if len(self._data[id_col].unique()) != len(self._data):
raise InvalidDataFrameException('Provided ID column must be unique for each row')
self._name_col = name_col
self._year_col = year_col
self._week_col = week_col
self._position_col = position_col
self._points_col = points_col
self._salary_col = salary_col
self._team_col = team_col
self._opponent_col = opponent_col
self._datetime_col = datetime_col
self._is_home_col = is_home_col
self._id_col = id_col
self._constraints = []
self._data[self._position_col] = self._data[self._position_col].apply(lambda x: normalize_position(x))
self._data.dropna(inplace=True)
self._data = self._data[self._data[self._salary_col] > 0]
@property
def data(self) -> pd.DataFrame:
return self._data
@property
def id_col(self) -> str:
return self._id_col
@property
def name_col(self) -> str:
return self._name_col
@property
def position_col(self) -> str:
return self._position_col
@property
def year_col(self) -> str:
return self._year_col
@property
def week_col(self) -> str:
return self._week_col
@property
def points_col(self) -> str:
return self._points_col
@property
def salary_col(self) -> str:
return self._salary_col
@property
def team_col(self) -> str:
return self._team_col
@property
def opponent_col(self) -> str:
return self._opponent_col
@property
def datetime_col(self) -> str:
return self._datetime_col
@property
def is_home_col(self) -> str:
return self._is_home_col
@abstractmethod
def num_players(self) -> int:
"""
Returns the total number of players that are to be included in an optimized lineup for the given site.
:return: The total number of players to be included in the lineup.
"""
raise NotImplementedError
@abstractmethod
def salary_cap(self) -> int:
"""
Returns the salary cap - or max available salary - to use for this given site's lineup optimization.
:return: The site's salary cap.
"""
raise NotImplementedError
@abstractmethod
def site_name(self) -> str:
"""
Returns the name of the fantasy site that this lineup optimizer is being used for. Ex. DraftKings, FanDuel, etc.
:return: The name of the fantasy site.
"""
raise NotImplementedError
@abstractmethod
def position_constraints(self) -> Dict[str, Tuple[int, int]]:
"""
Returns a dict that maps position to a tuple containing min and max numbers of this position that may be
included in the optimized lineup. Ex. { RB : (2, 3) ... }
:return: A dict mapping position name to min/max count.
"""
raise NotImplementedError
@abstractmethod
def player_order_dict(self) -> Dict[str, int]:
"""
Return a mapping of position to lineup order that is used to order players in an optimized lineup.
:return: the mapping of position to optimized lineup order.
"""
raise NotImplementedError
def set_only_include_teams(self, teams: List[str]) -> None:
"""
Sets the teams that are to be considered for the lineup optimization.
:param teams: The list of teams to consider.
:return: None
:raises: ValueError if teams to include is none or empty.
"""
if teams is None or len(teams) == 0:
raise ValueError('Included teams must not be none or empty')
self._add_constraint(constraints.OnlyIncludeTeamsConstraint(teams=teams,
team_column=self._team_col))
def set_exclude_teams(self, teams: List[str]) -> None:
"""
Sets the list of teams whose players are to be excluded from lineup optimization.
:param teams: The list of teams to exclude.
:return: None
:raises: ValueError if teams to exclude is none or empty.
"""
if teams is None or len(teams) == 0:
raise ValueError('Teams to exclude must not be none or empty')
for team in teams:
self.set_max_players_from_team(n=0, team=team)
def set_must_include_team(self, team: str):
"""
Specifies that a lineup must include a player from a given team.
:param team: The team that the lineup must include.
:return: None
:raises: ValueError if team is none or not found in data frame.
"""
self.set_min_players_from_team(n=1, team=team)
def set_must_include_player(self, **kwargs) -> None:
"""
Specifies that a lineup must include a player identified by either name or id.
Either name or id must be provided in kwargs.
:return: None
:raises: ValueError if the player is None or not found in the dataframe
"""
if all([it not in kwargs for it in ['id', 'name']]):
raise ValueError('Must provide id or name')
if 'id' in kwargs and self._id_col is None:
raise ValueError('ID column not specified')
key, col = (kwargs['id'], self._id_col) if 'id' in kwargs else (kwargs['name'], self.name_col)
if key is None or key not in self.data[col].unique():
raise ValueError(f"{key} not found in data frame's {col} column")
self._add_constraint(constraints.IncludePlayerConstraint(player=key,
name_col=col))
def set_exclude_player(self, **kwargs) -> None:
"""
Specifies that a lineup must exclude a player identified by name.
:return: None
:raises: ValueError if player is None or not found in dataframe
"""
if all([it not in kwargs for it in ['id', 'name']]):
raise ValueError('Must provide id or name')
if 'id' in kwargs and self._id_col is None:
raise ValueError('ID column not specified')
key, col = (kwargs['id'], self._id_col) if 'id' in kwargs else (kwargs['name'], self.name_col)
if key is None or key not in self.data[col].unique():
raise ValueError(f"{key} not found in data frame's {col} column")
self._add_constraint(constraints.ExcludePlayerConstraint(player=key,
name_col=col))
def set_num_players_from_team(self, n: int, team: str):
"""
Sets the number of players from a team that an optimized lineup must include.
:param n: the number of players
:param team: the team name
:return: None
:raises: ValueError if number of players or team are invalid
"""
if n is None or n > self.num_players():
raise ValueError('Invalid number of players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
self._add_constraint(constraints.MaxPlayersFromTeamConstraint(maximum=n,
team=team,
team_col=self._team_col))
try:
self._add_constraint(constraints.MinPlayersFromTeamConstraint(minimum=n,
team=team,
team_col=self._team_col))
except InvalidConstraintException:
self._constraints.pop() # remove max players constraint if this one fails
raise
def set_max_players_from_team(self, n: int, team: str) -> None:
"""
Sets the maximum number of players that can be included in an optimized lineup from a particular team.
:param n: the maximum number of players that can be included from a particular team
:param team: the name of the team
:return: None
:raises: ValueError if maximum or team are invalid
"""
if n is None or n < 0:
raise ValueError('Invalid maximum players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
self._add_constraint(constraints.MaxPlayersFromTeamConstraint(maximum=n,
team=team,
team_col=self._team_col))
def set_min_players_from_team(self, n: int, team: str) -> None:
"""
Sets the minimum number of players from a given team that must be included in an optimized lineup.
:param n: the minimum number of players from the specified team that must be included
:param team: the name of the team
:return: None
:raises: ValueError if minimum or team are invalid
"""
if n is None or n > self.num_players():
raise ValueError('Invalid minimum number of players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
if n == 0:
return
self._add_constraint(constraints.MinPlayersFromTeamConstraint(minimum=n,
team=team,
team_col=self._team_col))
def set_max_salary(self, n: int) -> None:
"""
Sets the maximum salary that can be used in an optimized lineup.
:param n: the max salary.
:return: None
:raises: ValueError if maximum is invalid
"""
if n is None or n <= 0:
raise ValueError('Invalid maximum')
self._add_constraint(constraints.MaxSalaryCapConstraint(salary=n,
salary_col=self._salary_col))
def set_min_salary(self, n: int) -> None:
"""
Sets the minimum salary that can be used in an optimized lineup.
:param n: the minimum salary
:return: None
:raises: ValueError if minimum is invalid
"""
if n is None or n > self.salary_cap():
raise ValueError('Invalid minimum')
self._add_constraint(constraints.MinSalaryCapConstraint(salary=n,
salary_col=self._salary_col))
def set_game_slate(self, slate: GameSlate) -> None:
"""
Set the game slate to determine which games are to be included in an optimized lineup.
:param slate: The game slate to include.
:return: None
"""
logger.warning(f"Setting game slate to {slate.name}")
self._add_constraint(constraints.GameSlateConstraint(slate=slate,
datetime_col=self._datetime_col,
week_col=self.week_col,
num_players=self.num_players()))
def _add_constraint(self, constraint: constraints.LineupConstraint) -> None:
"""
Internal method used to add a constraint by first checking if it is valid.
:param constraint: The constraint to add.
:return: None
:raises: InvalidConstraintException if the constraint is not valid
"""
is_valid, message = constraint.is_valid(self._constraints)
if is_valid:
self._constraints.append(constraint)
else:
raise InvalidConstraintException(f"Invalid constraint: {message}")
def clear_constraints(self) -> None:
"""
Clears the current lineup optimizer constraints.
:return: None
"""
self._constraints = []
def optimize_lineup(self) -> OptimizedLineup:
"""
Generates and returns an optimized lineup for a given fantasy football site.
The lineup is generated using the class's data variable and is optimized under provided constraints.
:return: The optimized lineup.
:raises: ValueError, InvalidDataFrameException
"""
position_constraints = self.position_constraints()
if not data_frame_utils.col_contains_all_values(self._data, self.position_col, position_constraints.keys()):
raise InvalidDataFrameException('Data frame is missing required positions')
self._data['LpVariable'] = self._data.apply(lambda x: LpVariable(f"{x[self._position_col]}_{x.name}",
cat='Binary'), axis=1)
problem = LpProblem(f"{self.site_name()}LineupOptimization", LpMaximize)
for k, v in position_constraints.items():
players = self.data[self.data[self._position_col] == k]
problem += lpSum(players['LpVariable']) >= v[0]
problem += lpSum(players['LpVariable']) <= v[1]
problem += lpSum(self.data[self._points_col] * self.data['LpVariable'])
problem += constraints.LineupSizeConstraint(self.num_players()).apply(self._data)[0]
problem += constraints.MaxSalaryCapConstraint(self.salary_cap(), self._salary_col).apply(self._data)[0]
for constraint in self._constraints:
for c in constraint.apply(self._data): # stack-related constraints may return multiple from apply()
problem += c
problem.solve(PULP_CBC_CMD(msg=False))
if not pulp_utils.is_optimal_solution_found(problem):
raise UnsolvableLineupException('No optimal solution found under current lineup constraints')
return OptimizedLineup(self, self.site_name()) | dfs/optimize.py | import csv
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple, Union
import pandas as pd
from pulp import LpMaximize, LpProblem, LpVariable, lpSum, PULP_CBC_CMD
from dfs import constraints
from dfs import data_frame_utils, pulp_utils
from dfs import file_utils
from dfs.exceptions import InvalidDataFrameException, UnsolvableLineupException, InvalidConstraintException
from dfs.nfl.positions import RB, WR, TE, FLEX, normalize_position
from dfs.slate import GameSlate
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class OptimizedLineup:
"""
A class that represents an optimized fantasy football lineup for a given site.
"""
def __init__(self, optimizer: 'LineupOptimizer', site: str):
"""
:param optimizer: The optimizer used to generate this lineup.
:param site: The fantasy site that the lineup has been generated for.
"""
self.site = site
players = optimizer.data[optimizer.data.apply(lambda x: x['LpVariable'].varValue == 1, axis=1)]
self.points = round(players[optimizer.points_col].sum(), 2)
self.salary = players[optimizer.salary_col].sum()
self.salary_cap = optimizer.salary_cap()
col_mapping = {
optimizer.id_col: 'id',
optimizer.name_col: 'name',
optimizer.position_col: 'position',
optimizer.team_col: 'team',
optimizer.opponent_col: 'opponent',
optimizer.points_col: 'points',
optimizer.salary_col: 'salary',
optimizer.datetime_col: 'datetime',
optimizer.is_home_col: 'is_home'
}
players_dict = players.to_dict('records')
position_to_count = dict()
for p in players_dict:
for k, v in col_mapping.items():
if k is not None: # id_col may be None
p[v] = p.pop(k)
keys_to_delete = []
for k in p.keys():
if k not in col_mapping.values():
keys_to_delete.append(k)
for k in keys_to_delete:
del p[k]
if p['position'] in position_to_count:
position_to_count[p['position']] = position_to_count[p['position']] + 1
else:
position_to_count[p['position']] = 1
self.players = [LineupPlayer(p) for p in players_dict]
for position in (RB, WR, TE):
_, maximum = optimizer.position_constraints()[position]
if position_to_count[position] == maximum:
logger.info(f"Flex position for this lineup is filled by {position}")
players_for_position = list(sorted(filter(lambda player: player.position == position, self.players),
key=lambda x: x.datetime))
players_for_position[-1].lineup_position = FLEX
break
self.players = sorted(self.players, key=lambda x: optimizer.player_order_dict()[x.lineup_position])
def write_to_file(self, file_path: str) -> None:
"""
Writes the optimized lineup to a CSV file.
:param file_path: the path to the file which will be created if it does not exist.
:return: None
:raises: ValueError if file_path is None or points to a non-CSV file.
"""
if file_path is None:
raise ValueError('File path cannot be none')
extension = file_utils.get_extension(file_path)
if extension != 'csv':
raise ValueError(f"Only CSV output is supported, found: {extension}")
file_exists = file_utils.file_exists(file_path)
with open(file_path, mode='a') as f:
writer = csv.DictWriter(f, fieldnames=dir(self.players[0]))
if not file_exists:
writer.writeheader()
writer.writerows([{k: player.__getattribute__(k) for k in dir(player)} for player in self.players])
def to_dict(self) -> Dict[str, Any]:
"""
Convert this optimized lineup into a dict.
:return: A dict representing this lineup.
"""
return {
'site': self.site,
'points': self.points,
'salary': self.salary,
'salary_cap': self.salary_cap,
'players': [p.to_dict() for p in self.players]
}
def __repr__(self):
return f"dfs.optimize.OptimizedLineup({self.to_dict()})"
def __str__(self):
players_string = '\n'.join([str(p) for p in self.players])
return (f"Optimized {self.site} Lineup \n"
f"{self.points} points @ {self.salary} salary \n" +
players_string)
class LineupPlayer:
"""
A model of a player included in an optimized lineup.
"""
def __init__(self, player_dict: dict):
"""
Initializer.
:param player_dict: the player dict corresponding to a row from the dataframe.
"""
self.name = player_dict['name']
self.position = player_dict['position']
self.lineup_position = player_dict['position']
self.team = player_dict['team']
self.opponent = player_dict['opponent']
self.points = player_dict['points']
self.salary = player_dict['salary']
self.datetime = player_dict['datetime']
self.is_home = player_dict['is_home']
def to_dict(self) -> Dict[str, Any]:
"""
Converts this lineup player into a dict.
:return: A dict representing this player.
"""
return {
'name': self.name,
'position': self.position,
'lineup_position': self.lineup_position,
'team': self.team,
'opponent': self.opponent,
'points': self.points,
'salary': self.salary,
'datetime': str(self.datetime),
'is_home': self.is_home
}
def __dir__(self):
return ['name', 'position', 'team', 'opponent', 'points', 'salary', 'datetime']
def __repr__(self):
return f"dfs.optimize.LineupPlayer({self.to_dict()})"
def __str__(self):
return f"{self.lineup_position} {self.name} - {self.team} - {self.points} @ {self.salary}"
class LineupOptimizer(ABC):
"""
A pandas data frame-based fantasy football lineup optimizer.
This class is used to generate optimal fantasy football lineups for various sites when provided
a data frame containing player, position, salary and points information.
"""
def __init__(self,
data_source: Union[pd.DataFrame, str],
name_col: str = 'name',
position_col: str = 'position',
year_col: str = 'year',
week_col: str = 'week',
points_col: str = 'points',
salary_col: str = 'salary',
team_col: str = 'team',
opponent_col: str = 'opponent',
datetime_col: str = 'datetime',
is_home_col: str = 'is_home',
id_col: str = None):
"""
:param data_source: A dataframe or file path containing fantasy data.
:param name_col: The player name column. Default is 'name'.
:param position_col: The player position column. Default is 'position'.
:param year_col: The year column. Default is 'year'.
:param week_col: The week column. Default is 'week'.
:param points_col: The fantasy points column. Default is 'points'.
:param salary_col: The player salary column. Default is 'salary'.
:param team_col: The player team column. Default is 'team'.
:param opponent_col: The player opponent column. Default is 'opponent'.
:param datetime_col: The datetime column. Default is 'datetime'.
:param is_home_col: The column to indicate whether or not the player is at home. Default is 'is_home'.
:param id_col: Optional ID column name.
"""
if type(data_source) is pd.DataFrame:
self._data = data_source.copy() # don't impact original dataframe
elif type(data_source) is str:
if not file_utils.file_exists(file=data_source):
raise ValueError('The data source file does not exist!')
extension = file_utils.get_extension(file_path=data_source)
if extension == 'csv':
self._data = pd.read_csv(filepath_or_buffer=data_source)
elif extension == 'xlsx':
self._data = pd.read_excel(io=data_source, engine='openpyxl')
else:
raise ValueError('Invalid data source file path! csv and xlsx are supported.')
else:
raise ValueError('Invalid data source type!')
if not all(c in self._data.columns for c in [name_col,
position_col,
year_col,
week_col,
points_col,
salary_col,
team_col,
opponent_col,
is_home_col,
datetime_col]):
raise InvalidDataFrameException('DataFrame does not contain necessary columns')
if id_col is not None:
if len(self._data[id_col].unique()) != len(self._data):
raise InvalidDataFrameException('Provided ID column must be unique for each row')
self._name_col = name_col
self._year_col = year_col
self._week_col = week_col
self._position_col = position_col
self._points_col = points_col
self._salary_col = salary_col
self._team_col = team_col
self._opponent_col = opponent_col
self._datetime_col = datetime_col
self._is_home_col = is_home_col
self._id_col = id_col
self._constraints = []
self._data[self._position_col] = self._data[self._position_col].apply(lambda x: normalize_position(x))
self._data.dropna(inplace=True)
self._data = self._data[self._data[self._salary_col] > 0]
@property
def data(self) -> pd.DataFrame:
return self._data
@property
def id_col(self) -> str:
return self._id_col
@property
def name_col(self) -> str:
return self._name_col
@property
def position_col(self) -> str:
return self._position_col
@property
def year_col(self) -> str:
return self._year_col
@property
def week_col(self) -> str:
return self._week_col
@property
def points_col(self) -> str:
return self._points_col
@property
def salary_col(self) -> str:
return self._salary_col
@property
def team_col(self) -> str:
return self._team_col
@property
def opponent_col(self) -> str:
return self._opponent_col
@property
def datetime_col(self) -> str:
return self._datetime_col
@property
def is_home_col(self) -> str:
return self._is_home_col
@abstractmethod
def num_players(self) -> int:
"""
Returns the total number of players that are to be included in an optimized lineup for the given site.
:return: The total number of players to be included in the lineup.
"""
raise NotImplementedError
@abstractmethod
def salary_cap(self) -> int:
"""
Returns the salary cap - or max available salary - to use for this given site's lineup optimization.
:return: The site's salary cap.
"""
raise NotImplementedError
@abstractmethod
def site_name(self) -> str:
"""
Returns the name of the fantasy site that this lineup optimizer is being used for. Ex. DraftKings, FanDuel, etc.
:return: The name of the fantasy site.
"""
raise NotImplementedError
@abstractmethod
def position_constraints(self) -> Dict[str, Tuple[int, int]]:
"""
Returns a dict that maps position to a tuple containing min and max numbers of this position that may be
included in the optimized lineup. Ex. { RB : (2, 3) ... }
:return: A dict mapping position name to min/max count.
"""
raise NotImplementedError
@abstractmethod
def player_order_dict(self) -> Dict[str, int]:
"""
Return a mapping of position to lineup order that is used to order players in an optimized lineup.
:return: the mapping of position to optimized lineup order.
"""
raise NotImplementedError
def set_only_include_teams(self, teams: List[str]) -> None:
"""
Sets the teams that are to be considered for the lineup optimization.
:param teams: The list of teams to consider.
:return: None
:raises: ValueError if teams to include is none or empty.
"""
if teams is None or len(teams) == 0:
raise ValueError('Included teams must not be none or empty')
self._add_constraint(constraints.OnlyIncludeTeamsConstraint(teams=teams,
team_column=self._team_col))
def set_exclude_teams(self, teams: List[str]) -> None:
"""
Sets the list of teams whose players are to be excluded from lineup optimization.
:param teams: The list of teams to exclude.
:return: None
:raises: ValueError if teams to exclude is none or empty.
"""
if teams is None or len(teams) == 0:
raise ValueError('Teams to exclude must not be none or empty')
for team in teams:
self.set_max_players_from_team(n=0, team=team)
def set_must_include_team(self, team: str):
"""
Specifies that a lineup must include a player from a given team.
:param team: The team that the lineup must include.
:return: None
:raises: ValueError if team is none or not found in data frame.
"""
self.set_min_players_from_team(n=1, team=team)
def set_must_include_player(self, **kwargs) -> None:
"""
Specifies that a lineup must include a player identified by either name or id.
Either name or id must be provided in kwargs.
:return: None
:raises: ValueError if the player is None or not found in the dataframe
"""
if all([it not in kwargs for it in ['id', 'name']]):
raise ValueError('Must provide id or name')
if 'id' in kwargs and self._id_col is None:
raise ValueError('ID column not specified')
key, col = (kwargs['id'], self._id_col) if 'id' in kwargs else (kwargs['name'], self.name_col)
if key is None or key not in self.data[col].unique():
raise ValueError(f"{key} not found in data frame's {col} column")
self._add_constraint(constraints.IncludePlayerConstraint(player=key,
name_col=col))
def set_exclude_player(self, **kwargs) -> None:
"""
Specifies that a lineup must exclude a player identified by name.
:return: None
:raises: ValueError if player is None or not found in dataframe
"""
if all([it not in kwargs for it in ['id', 'name']]):
raise ValueError('Must provide id or name')
if 'id' in kwargs and self._id_col is None:
raise ValueError('ID column not specified')
key, col = (kwargs['id'], self._id_col) if 'id' in kwargs else (kwargs['name'], self.name_col)
if key is None or key not in self.data[col].unique():
raise ValueError(f"{key} not found in data frame's {col} column")
self._add_constraint(constraints.ExcludePlayerConstraint(player=key,
name_col=col))
def set_num_players_from_team(self, n: int, team: str):
"""
Sets the number of players from a team that an optimized lineup must include.
:param n: the number of players
:param team: the team name
:return: None
:raises: ValueError if number of players or team are invalid
"""
if n is None or n > self.num_players():
raise ValueError('Invalid number of players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
self._add_constraint(constraints.MaxPlayersFromTeamConstraint(maximum=n,
team=team,
team_col=self._team_col))
try:
self._add_constraint(constraints.MinPlayersFromTeamConstraint(minimum=n,
team=team,
team_col=self._team_col))
except InvalidConstraintException:
self._constraints.pop() # remove max players constraint if this one fails
raise
def set_max_players_from_team(self, n: int, team: str) -> None:
"""
Sets the maximum number of players that can be included in an optimized lineup from a particular team.
:param n: the maximum number of players that can be included from a particular team
:param team: the name of the team
:return: None
:raises: ValueError if maximum or team are invalid
"""
if n is None or n < 0:
raise ValueError('Invalid maximum players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
self._add_constraint(constraints.MaxPlayersFromTeamConstraint(maximum=n,
team=team,
team_col=self._team_col))
def set_min_players_from_team(self, n: int, team: str) -> None:
"""
Sets the minimum number of players from a given team that must be included in an optimized lineup.
:param n: the minimum number of players from the specified team that must be included
:param team: the name of the team
:return: None
:raises: ValueError if minimum or team are invalid
"""
if n is None or n > self.num_players():
raise ValueError('Invalid minimum number of players')
if team is None or team not in self.data[self._team_col].unique():
raise ValueError('Invalid team name')
if n == 0:
return
self._add_constraint(constraints.MinPlayersFromTeamConstraint(minimum=n,
team=team,
team_col=self._team_col))
def set_max_salary(self, n: int) -> None:
"""
Sets the maximum salary that can be used in an optimized lineup.
:param n: the max salary.
:return: None
:raises: ValueError if maximum is invalid
"""
if n is None or n <= 0:
raise ValueError('Invalid maximum')
self._add_constraint(constraints.MaxSalaryCapConstraint(salary=n,
salary_col=self._salary_col))
def set_min_salary(self, n: int) -> None:
"""
Sets the minimum salary that can be used in an optimized lineup.
:param n: the minimum salary
:return: None
:raises: ValueError if minimum is invalid
"""
if n is None or n > self.salary_cap():
raise ValueError('Invalid minimum')
self._add_constraint(constraints.MinSalaryCapConstraint(salary=n,
salary_col=self._salary_col))
def set_game_slate(self, slate: GameSlate) -> None:
"""
Set the game slate to determine which games are to be included in an optimized lineup.
:param slate: The game slate to include.
:return: None
"""
logger.warning(f"Setting game slate to {slate.name}")
self._add_constraint(constraints.GameSlateConstraint(slate=slate,
datetime_col=self._datetime_col,
week_col=self.week_col,
num_players=self.num_players()))
def _add_constraint(self, constraint: constraints.LineupConstraint) -> None:
"""
Internal method used to add a constraint by first checking if it is valid.
:param constraint: The constraint to add.
:return: None
:raises: InvalidConstraintException if the constraint is not valid
"""
is_valid, message = constraint.is_valid(self._constraints)
if is_valid:
self._constraints.append(constraint)
else:
raise InvalidConstraintException(f"Invalid constraint: {message}")
def clear_constraints(self) -> None:
"""
Clears the current lineup optimizer constraints.
:return: None
"""
self._constraints = []
def optimize_lineup(self) -> OptimizedLineup:
"""
Generates and returns an optimized lineup for a given fantasy football site.
The lineup is generated using the class's data variable and is optimized under provided constraints.
:return: The optimized lineup.
:raises: ValueError, InvalidDataFrameException
"""
position_constraints = self.position_constraints()
if not data_frame_utils.col_contains_all_values(self._data, self.position_col, position_constraints.keys()):
raise InvalidDataFrameException('Data frame is missing required positions')
self._data['LpVariable'] = self._data.apply(lambda x: LpVariable(f"{x[self._position_col]}_{x.name}",
cat='Binary'), axis=1)
problem = LpProblem(f"{self.site_name()}LineupOptimization", LpMaximize)
for k, v in position_constraints.items():
players = self.data[self.data[self._position_col] == k]
problem += lpSum(players['LpVariable']) >= v[0]
problem += lpSum(players['LpVariable']) <= v[1]
problem += lpSum(self.data[self._points_col] * self.data['LpVariable'])
problem += constraints.LineupSizeConstraint(self.num_players()).apply(self._data)[0]
problem += constraints.MaxSalaryCapConstraint(self.salary_cap(), self._salary_col).apply(self._data)[0]
for constraint in self._constraints:
for c in constraint.apply(self._data): # stack-related constraints may return multiple from apply()
problem += c
problem.solve(PULP_CBC_CMD(msg=False))
if not pulp_utils.is_optimal_solution_found(problem):
raise UnsolvableLineupException('No optimal solution found under current lineup constraints')
return OptimizedLineup(self, self.site_name()) | 0.875348 | 0.198996 |
import os
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from ..conftest import CONFIG
from ..conftest import read_json
from ..conftest import TESTING_CONFIG_DIR
from ..conftest import UTILS_DATA_DIR
testdata = read_json(os.path.join(UTILS_DATA_DIR, CONFIG.FILENAME_DATASETS,))
test_config = read_json(
os.path.join(TESTING_CONFIG_DIR, "default/system/test-config_datasets.json",)
)
class TestAccess:
@pytest.mark.v4_20
@pytest.mark.utils
@pytest.mark.parametrize("test_input", testdata)
def test_pid_url_not_logged_in(self, config, session, test_input):
"""Test all Dataset XHTML URL's as not-logged-in user."""
# Arrange
url = f"{config.BASE_URL}/dataset.xhtml?persistentId={test_input['pid']}"
# Act
resp = session.get(url)
# Assert
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "text/html;charset=UTF-8"
assert resp.url == url
# Cleanup
@pytest.mark.v4_20
@pytest.mark.utils
@pytest.mark.parametrize("test_input", testdata)
def test_doiorg_url(self, config, session, test_input):
"""Test all doi.org URL's."""
# Arrange
url_start = f"https://doi.org/{test_input['pid']}"
url_end = f"{config.BASE_URL}/dataset.xhtml?persistentId={test_input['pid']}"
# Act
resp = session.get(url_start)
# Assert
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "text/html;charset=UTF-8"
assert resp.url == url_end
# Cleanup
class TestSidebar:
@pytest.mark.v4_20
@pytest.mark.selenium
@pytest.mark.utils
@pytest.mark.parametrize(
"test_input,expected",
test_config["sidebar"]["facet-not-logged-in"]["input-expected"],
)
def test_facet_not_logged_in(self, config, homepage, test_input, expected):
"""Test all Datasets in facet as not-logged-in user."""
# Arrange
selenium = homepage
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
# Act
selenium.get(config.BASE_URL)
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
wait.until(
EC.visibility_of_element_located((By.XPATH, "//div[@id='dv-sidebar']"))
)
facet_dataset = selenium.find_element(
By.XPATH, "//span[@class='facetTypeDataset']"
)
# Assert
assert facet_dataset.text == f"Datasets ({expected['num-datasets']})"
# Cleanup | src/dvtests/testing/default/system/test_datasets.py | import os
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from ..conftest import CONFIG
from ..conftest import read_json
from ..conftest import TESTING_CONFIG_DIR
from ..conftest import UTILS_DATA_DIR
testdata = read_json(os.path.join(UTILS_DATA_DIR, CONFIG.FILENAME_DATASETS,))
test_config = read_json(
os.path.join(TESTING_CONFIG_DIR, "default/system/test-config_datasets.json",)
)
class TestAccess:
@pytest.mark.v4_20
@pytest.mark.utils
@pytest.mark.parametrize("test_input", testdata)
def test_pid_url_not_logged_in(self, config, session, test_input):
"""Test all Dataset XHTML URL's as not-logged-in user."""
# Arrange
url = f"{config.BASE_URL}/dataset.xhtml?persistentId={test_input['pid']}"
# Act
resp = session.get(url)
# Assert
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "text/html;charset=UTF-8"
assert resp.url == url
# Cleanup
@pytest.mark.v4_20
@pytest.mark.utils
@pytest.mark.parametrize("test_input", testdata)
def test_doiorg_url(self, config, session, test_input):
"""Test all doi.org URL's."""
# Arrange
url_start = f"https://doi.org/{test_input['pid']}"
url_end = f"{config.BASE_URL}/dataset.xhtml?persistentId={test_input['pid']}"
# Act
resp = session.get(url_start)
# Assert
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "text/html;charset=UTF-8"
assert resp.url == url_end
# Cleanup
class TestSidebar:
@pytest.mark.v4_20
@pytest.mark.selenium
@pytest.mark.utils
@pytest.mark.parametrize(
"test_input,expected",
test_config["sidebar"]["facet-not-logged-in"]["input-expected"],
)
def test_facet_not_logged_in(self, config, homepage, test_input, expected):
"""Test all Datasets in facet as not-logged-in user."""
# Arrange
selenium = homepage
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
# Act
selenium.get(config.BASE_URL)
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
wait.until(
EC.visibility_of_element_located((By.XPATH, "//div[@id='dv-sidebar']"))
)
facet_dataset = selenium.find_element(
By.XPATH, "//span[@class='facetTypeDataset']"
)
# Assert
assert facet_dataset.text == f"Datasets ({expected['num-datasets']})"
# Cleanup | 0.42919 | 0.272076 |
from contextlib import contextmanager
from typing import Dict, List
from docker.models.containers import Container
from loguru import logger
from pydantic import BaseModel as Base
from .circuit import OnionCircuit
from .client import ContainerBase, ContainerOptions
from .mount import MountFile, MountPoint
HAPROXY_IMAGE = "haproxy:2.2.3"
class HAProxyOptions(Base):
"""Handles options for HAProxy docker instance.
Attributes:
max_connections (int): Maximum per-process number of concurrent connections.
timeout_client (int): Maximum inactivity time on the client side.
timeout_connect (int): Maximum time to wait for a connection attempt to a server
to succeed.
timeout_queue (int): Maximum time to wait in the queue for a connection slot
to be free.
timeout_server (int): Maximum inactivity time on the server side.
listen_host_port (int): Frontend port to the proxy.
backend_name (str): Name of Backend section.
dashboard_bind_port (int): Port to open to reach the HAProxy dashboard.
dashboard_refresh_rate (int): Refresh rate of the HAProxy dashboard page.
onions (List[Container]): Each onion container that is connected to the whaornet.
"""
max_connections: int = 4096
timeout_client: int = 3600
timeout_connect: int = 1
timeout_queue: int = 5
timeout_server: int = 3600
listen_host_port: int = 8001
backend_name: str = "onions"
dashboard_bind_port: int = 9999
dashboard_refresh_rate: int = 2
onions: List[Container]
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def ports(self) -> List[int]:
"""Ports which will be used to expose on the local network."""
return [self.listen_host_port, self.dashboard_bind_port]
class Balancer(ContainerBase):
"""HAProxy Load Balancer.
Attributes:
haproxy_options (HAProxyOptions): HAProxy options object.
container_options (ContainerOptions): Container options for the HA proxy instance.
"""
haproxy_options: HAProxyOptions
container_options: ContainerOptions = ContainerOptions(image=HAPROXY_IMAGE)
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def address(self) -> str:
"""Return socks5 address to poxy requests through."""
return f"socks5://localhost:{self.haproxy_options.listen_host_port}"
@property
def dashboard_address(self) -> str:
"""Return full dashboard address."""
return f"http://localhost:{self.haproxy_options.dashboard_bind_port}"
@property
def proxies(self) -> Dict[str, str]:
"""Return proxies to mount onto a requests session."""
return {
"http": self.address,
"https": self.address,
}
def add_mount_point(self, mount: MountFile) -> None:
"""Mount a volume into the HAProxy container.
Args:
mount (MountFile): File to mount between the container and local file system.
"""
self.container_options.mounts.append(mount.mount)
def display_settings(self) -> None:
"""Log config settings to stdout."""
logger.debug(
"\n==================="
"\nOnion Load Balancer"
"\n==================="
"\n" + self.json(indent=4)
)
self.show_follow_logs_command()
@contextmanager
# pylint: disable=invalid-name
def OnionBalancer(onions: List[OnionCircuit], show_log: bool = False) -> Balancer:
"""Context manager which yields a started instance of an HAProxy docker container.
Args:
onions (List[OnionCircuit]): List of tor containers to load balance requests across.
show_log (bool): If True shows the HAProxies logs on start and stop.
Yields:
Balancer: A started instance of a HAProxy docker container.
"""
haproxy_options = HAProxyOptions(onions=onions)
with MountPoint(
template_name="haproxy.cfg",
target_path="/usr/local/etc/haproxy/haproxy.cfg",
template_variables=haproxy_options.dict(),
) as mount_point:
try:
balancer = Balancer(haproxy_options=haproxy_options)
balancer.add_mount_point(mount_point)
for port in haproxy_options.ports:
balancer.expose_port(port)
balancer.start(show_log=show_log)
balancer.display_settings()
yield balancer
finally:
balancer.stop(show_log=show_log) | requests_whaor/balancer.py |
from contextlib import contextmanager
from typing import Dict, List
from docker.models.containers import Container
from loguru import logger
from pydantic import BaseModel as Base
from .circuit import OnionCircuit
from .client import ContainerBase, ContainerOptions
from .mount import MountFile, MountPoint
HAPROXY_IMAGE = "haproxy:2.2.3"
class HAProxyOptions(Base):
"""Handles options for HAProxy docker instance.
Attributes:
max_connections (int): Maximum per-process number of concurrent connections.
timeout_client (int): Maximum inactivity time on the client side.
timeout_connect (int): Maximum time to wait for a connection attempt to a server
to succeed.
timeout_queue (int): Maximum time to wait in the queue for a connection slot
to be free.
timeout_server (int): Maximum inactivity time on the server side.
listen_host_port (int): Frontend port to the proxy.
backend_name (str): Name of Backend section.
dashboard_bind_port (int): Port to open to reach the HAProxy dashboard.
dashboard_refresh_rate (int): Refresh rate of the HAProxy dashboard page.
onions (List[Container]): Each onion container that is connected to the whaornet.
"""
max_connections: int = 4096
timeout_client: int = 3600
timeout_connect: int = 1
timeout_queue: int = 5
timeout_server: int = 3600
listen_host_port: int = 8001
backend_name: str = "onions"
dashboard_bind_port: int = 9999
dashboard_refresh_rate: int = 2
onions: List[Container]
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def ports(self) -> List[int]:
"""Ports which will be used to expose on the local network."""
return [self.listen_host_port, self.dashboard_bind_port]
class Balancer(ContainerBase):
"""HAProxy Load Balancer.
Attributes:
haproxy_options (HAProxyOptions): HAProxy options object.
container_options (ContainerOptions): Container options for the HA proxy instance.
"""
haproxy_options: HAProxyOptions
container_options: ContainerOptions = ContainerOptions(image=HAPROXY_IMAGE)
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def address(self) -> str:
"""Return socks5 address to poxy requests through."""
return f"socks5://localhost:{self.haproxy_options.listen_host_port}"
@property
def dashboard_address(self) -> str:
"""Return full dashboard address."""
return f"http://localhost:{self.haproxy_options.dashboard_bind_port}"
@property
def proxies(self) -> Dict[str, str]:
"""Return proxies to mount onto a requests session."""
return {
"http": self.address,
"https": self.address,
}
def add_mount_point(self, mount: MountFile) -> None:
"""Mount a volume into the HAProxy container.
Args:
mount (MountFile): File to mount between the container and local file system.
"""
self.container_options.mounts.append(mount.mount)
def display_settings(self) -> None:
"""Log config settings to stdout."""
logger.debug(
"\n==================="
"\nOnion Load Balancer"
"\n==================="
"\n" + self.json(indent=4)
)
self.show_follow_logs_command()
@contextmanager
# pylint: disable=invalid-name
def OnionBalancer(onions: List[OnionCircuit], show_log: bool = False) -> Balancer:
"""Context manager which yields a started instance of an HAProxy docker container.
Args:
onions (List[OnionCircuit]): List of tor containers to load balance requests across.
show_log (bool): If True shows the HAProxies logs on start and stop.
Yields:
Balancer: A started instance of a HAProxy docker container.
"""
haproxy_options = HAProxyOptions(onions=onions)
with MountPoint(
template_name="haproxy.cfg",
target_path="/usr/local/etc/haproxy/haproxy.cfg",
template_variables=haproxy_options.dict(),
) as mount_point:
try:
balancer = Balancer(haproxy_options=haproxy_options)
balancer.add_mount_point(mount_point)
for port in haproxy_options.ports:
balancer.expose_port(port)
balancer.start(show_log=show_log)
balancer.display_settings()
yield balancer
finally:
balancer.stop(show_log=show_log) | 0.922124 | 0.241389 |
import enum
class SubmitStatus(object):
# TODO(actics): make this abstract (now it timus only support)
_processing_verdicts = ['Compiling', 'Running', 'Waiting']
_running_verdict = 'Running'
_accepted_verdict = 'Accepted'
_failed_verdict = 'Failed'
_compilation_error_info = 'Compilation error'
def __init__(self) -> None:
self.submit_id = ''
self.date = ''
self.author = ''
self.problem = ''
self.language = ''
self.verdict = ''
self.test = ''
self.runtime = ''
self.memory = ''
self.info = ''
self.source_file = ''
def set_verdict(self, verdict: str) -> None:
self.verdict = verdict
if not self.in_process and not self.accepted:
self.verdict = self._failed_verdict
self.info = verdict
@property
def in_process(self) -> bool:
return self.verdict in self._processing_verdicts
@property
def running(self) -> bool:
return self.verdict == self._running_verdict
@property
def accepted(self) -> bool:
return self.verdict == self._accepted_verdict
@property
def failed(self) -> bool:
return self.verdict == self._failed_verdict
@property
def compilation_error(self) -> bool:
return self.info == self._compilation_error_info
class Problem(object):
def __init__(self):
self.number = 0
self.title = ''
self.time_limit = ''
self.memory_limit = ''
self.text = ''
self.input = ''
self.output = ''
self.sample_inputs = []
self.sample_outputs = []
self.author = ''
self.source = ''
self.tags = []
self.difficulty = 0
self.is_accepted = None
self.discussion_count = 0
self.submission_count = 0
self.accepted_submission_count = 0
self.rating_length = 0
class SortType(enum.Enum):
id = 'id'
authors = 'authors'
difficulty = 'difficulty'
class IdWithDescription(object):
def __init__(self, obj_id: str, description: str):
self.id = obj_id
self.description = description
class Language(IdWithDescription):
pass
class ProblemsTag(IdWithDescription):
pass
class ProblemsPage(IdWithDescription):
pass | acm_cli/acm_api/structs.py | import enum
class SubmitStatus(object):
# TODO(actics): make this abstract (now it timus only support)
_processing_verdicts = ['Compiling', 'Running', 'Waiting']
_running_verdict = 'Running'
_accepted_verdict = 'Accepted'
_failed_verdict = 'Failed'
_compilation_error_info = 'Compilation error'
def __init__(self) -> None:
self.submit_id = ''
self.date = ''
self.author = ''
self.problem = ''
self.language = ''
self.verdict = ''
self.test = ''
self.runtime = ''
self.memory = ''
self.info = ''
self.source_file = ''
def set_verdict(self, verdict: str) -> None:
self.verdict = verdict
if not self.in_process and not self.accepted:
self.verdict = self._failed_verdict
self.info = verdict
@property
def in_process(self) -> bool:
return self.verdict in self._processing_verdicts
@property
def running(self) -> bool:
return self.verdict == self._running_verdict
@property
def accepted(self) -> bool:
return self.verdict == self._accepted_verdict
@property
def failed(self) -> bool:
return self.verdict == self._failed_verdict
@property
def compilation_error(self) -> bool:
return self.info == self._compilation_error_info
class Problem(object):
def __init__(self):
self.number = 0
self.title = ''
self.time_limit = ''
self.memory_limit = ''
self.text = ''
self.input = ''
self.output = ''
self.sample_inputs = []
self.sample_outputs = []
self.author = ''
self.source = ''
self.tags = []
self.difficulty = 0
self.is_accepted = None
self.discussion_count = 0
self.submission_count = 0
self.accepted_submission_count = 0
self.rating_length = 0
class SortType(enum.Enum):
id = 'id'
authors = 'authors'
difficulty = 'difficulty'
class IdWithDescription(object):
def __init__(self, obj_id: str, description: str):
self.id = obj_id
self.description = description
class Language(IdWithDescription):
pass
class ProblemsTag(IdWithDescription):
pass
class ProblemsPage(IdWithDescription):
pass | 0.339828 | 0.130979 |
from pathlib import Path
import requests
import json
import csv
class Elasticsearch:
def __init__(self, index):
self.cluster_health_url = "http://elasticsearch:9200/_cluster/health"
self.index_template_url = "http://elasticsearch:9200/_index_template/template_1"
self.index_url = f"http://elasticsearch:9200/{index}/"
self.index_doc_count_url = f"http://elasticsearch:9200/{index}/_count"
self.index_doc_url = f"http://elasticsearch:9200/{index}/_doc/"
self.headers = {
'Content-Type': 'application/json'
}
def es_healthcheck(self):
try:
response = requests.request("GET", self.cluster_health_url, headers={}, data={})
if(response.status_code==200):
response = response.json()
status = response["status"]
if(status != "red"):
print("💪 ES is {} and healthy".format(status))
return True
else:
print("🤒 ES is {} and not healthy".format(status))
return False
else:
return False
except Exception as e:
print("❌ Exception: ",e)
return False
def create_es_index(self):
# Create ES template and index if not exist
response = requests.request("GET", self.index_template_url, headers={}, data={})
if(response.status_code != 200):
payload = json.dumps({
"index_patterns": "cs.stanford",
"template": {
"settings": {
"number_of_shards": 1
},
"mappings": {
"_source": {
"enabled": True
},
"properties": {
"topic": {
"type": "text"
},
"title": {
"type": "completion"
},
"url": {
"type": "text"
},
"labels": {
"type": "text"
},
"upvotes": {
"type": "integer"
}
}
}
}
})
requests.request("PUT", self.index_template_url, headers=self.headers, data=payload)
print("Index template creation is successful")
else:
print("Index template already exists")
response = requests.request("GET", self.index_url, headers={}, data={})
if(response.status_code != 200):
requests.request("PUT", self.index_url, headers={}, data={})
print("Index creation is successful")
else:
print("Index already exists")
def es_record_count(self):
response = requests.request("GET", self.index_doc_count_url, headers={}, data={})
response = json.loads(response.text)
total_doc = response["count"]
return total_doc
def add_documents(self):
total_doc = self.es_record_count()
if total_doc<=0:
tutorials_csv_file_path = "{}/tutorials.csv".format(Path(__file__).parents[1])
# Add documents if there are no records in the index.
with open(tutorials_csv_file_path) as csv_file:
# creating a csv reader object
csv_reader = csv.reader(csv_file)
# extracting field names through first row
fields = next(csv_reader)
print(fields)
# extracting each data row one by one
for row in csv_reader:
payload={
"topic": row[1],
"title": {
"input": row[2],
},
"url": row[3],
"labels": row[4],
"upvotes": int(row[5])
}
payload = json.dumps(payload)
response = requests.request("POST", self.index_doc_url, headers=self.headers, data=payload)
if response.status_code == 200 or response.status_code == 201:
response = json.loads(response.text)
print("Indexed document: {}".format(response["_seq_no"]+1))
def pre_condition_check(self):
if(self.es_healthcheck()):
self.create_es_index()
self.add_documents()
total_doc = self.es_record_count()
if(total_doc>0):
return True
else:
return False
else:
return False | backend/utils/elasticsearch.py | from pathlib import Path
import requests
import json
import csv
class Elasticsearch:
def __init__(self, index):
self.cluster_health_url = "http://elasticsearch:9200/_cluster/health"
self.index_template_url = "http://elasticsearch:9200/_index_template/template_1"
self.index_url = f"http://elasticsearch:9200/{index}/"
self.index_doc_count_url = f"http://elasticsearch:9200/{index}/_count"
self.index_doc_url = f"http://elasticsearch:9200/{index}/_doc/"
self.headers = {
'Content-Type': 'application/json'
}
def es_healthcheck(self):
try:
response = requests.request("GET", self.cluster_health_url, headers={}, data={})
if(response.status_code==200):
response = response.json()
status = response["status"]
if(status != "red"):
print("💪 ES is {} and healthy".format(status))
return True
else:
print("🤒 ES is {} and not healthy".format(status))
return False
else:
return False
except Exception as e:
print("❌ Exception: ",e)
return False
def create_es_index(self):
# Create ES template and index if not exist
response = requests.request("GET", self.index_template_url, headers={}, data={})
if(response.status_code != 200):
payload = json.dumps({
"index_patterns": "cs.stanford",
"template": {
"settings": {
"number_of_shards": 1
},
"mappings": {
"_source": {
"enabled": True
},
"properties": {
"topic": {
"type": "text"
},
"title": {
"type": "completion"
},
"url": {
"type": "text"
},
"labels": {
"type": "text"
},
"upvotes": {
"type": "integer"
}
}
}
}
})
requests.request("PUT", self.index_template_url, headers=self.headers, data=payload)
print("Index template creation is successful")
else:
print("Index template already exists")
response = requests.request("GET", self.index_url, headers={}, data={})
if(response.status_code != 200):
requests.request("PUT", self.index_url, headers={}, data={})
print("Index creation is successful")
else:
print("Index already exists")
def es_record_count(self):
response = requests.request("GET", self.index_doc_count_url, headers={}, data={})
response = json.loads(response.text)
total_doc = response["count"]
return total_doc
def add_documents(self):
total_doc = self.es_record_count()
if total_doc<=0:
tutorials_csv_file_path = "{}/tutorials.csv".format(Path(__file__).parents[1])
# Add documents if there are no records in the index.
with open(tutorials_csv_file_path) as csv_file:
# creating a csv reader object
csv_reader = csv.reader(csv_file)
# extracting field names through first row
fields = next(csv_reader)
print(fields)
# extracting each data row one by one
for row in csv_reader:
payload={
"topic": row[1],
"title": {
"input": row[2],
},
"url": row[3],
"labels": row[4],
"upvotes": int(row[5])
}
payload = json.dumps(payload)
response = requests.request("POST", self.index_doc_url, headers=self.headers, data=payload)
if response.status_code == 200 or response.status_code == 201:
response = json.loads(response.text)
print("Indexed document: {}".format(response["_seq_no"]+1))
def pre_condition_check(self):
if(self.es_healthcheck()):
self.create_es_index()
self.add_documents()
total_doc = self.es_record_count()
if(total_doc>0):
return True
else:
return False
else:
return False | 0.308711 | 0.232272 |
import os
import numpy as np
from math import sqrt
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric.data import Dataset
from torch_geometric import data as DATA
import torch
import pdb
class TrainDataset(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, onehot_train_mixed=None,smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.onehot_train_mixed = onehot_train_mixed
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, train_mixed, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.train_mixed = torch.LongTensor([train_mixed])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.train_mixed = torch.LongTensor([train_mixed])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
train_mixed = self.onehot_train_mixed[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, train_mixed, self.smile_graph)
return data1, data2
class TrainDataset1(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset1, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, self.smile_graph)
return data1, data2
class TestDataset(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,test_index=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset, self).__init__(root,transform, pre_transform)
self.test_index = test_index
self.max_len = max([len(i) for i in self.test_index])
self.data_list = self.process(xd, xt, y,smile_graph)
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_test_index = self.test_index[idx]
return_data = [ self.data_list[index] for index in return_test_index]
return return_data
class TestDataset1(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,groupID=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset1, self).__init__(root,transform, pre_transform)
self.xd = xd
self.xt = xt
self.y = y
self.groupID = groupID
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_data = process( xd[idx], xt[idx], y[idx], smile_graph)
return_group = self.groupID[idx]
return (return_data, return_group)
class Data_Encoder(Dataset):
def __init__(self, root='./', data=None, transform=None, pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(Data_Encoder, self).__init__(root,transform, pre_transform)
self.data = data
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def len(self):
return len(self.data)
def get(self, idx):
return_data = self.data[idx]
return return_data | apps/drug_target_interaction/hybriddta/pairwise/GraphDTA/utils.py | import os
import numpy as np
from math import sqrt
from scipy import stats
from torch_geometric.data import InMemoryDataset, DataLoader
from torch_geometric.data import Dataset
from torch_geometric import data as DATA
import torch
import pdb
class TrainDataset(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, onehot_train_mixed=None,smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.onehot_train_mixed = onehot_train_mixed
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, train_mixed, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.train_mixed = torch.LongTensor([train_mixed])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.train_mixed = torch.LongTensor([train_mixed])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
train_mixed = self.onehot_train_mixed[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, train_mixed, self.smile_graph)
return data1, data2
class TrainDataset1(Dataset):
def __init__(self, root='./', train_x1_index=None, train_x2_index=None, train_d=None, train_t=None, y=None, smile_graph=None,transform=None,
pre_transform=None):
super(TrainDataset1, self).__init__(root,transform, pre_transform)
#root is required for save preprocessed data, default is '/tmp'
self.train_x1_index = train_x1_index
self.train_x2_index = train_x2_index
self.train_d = train_d
self.train_t = train_t
self.y = y
self.smile_graph = smile_graph
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd1, xd2, xt1, xt2, y, smile_graph):
smiles1 = xd1
target1 = xt1
smiles2 = xd2
target2 = xt2
labels = y
# convert SMILES to molecular representation using rdkit
c_size1, features1, edge_index1 = smile_graph[smiles1]
c_size2, features2, edge_index2 = smile_graph[smiles2]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData1 = DATA.Data(x=torch.Tensor(features1),
edge_index=torch.LongTensor(edge_index1).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData1.target = torch.LongTensor([target1])
GCNData1.__setitem__('c_size', torch.LongTensor([c_size1]))
GCNData2 = DATA.Data(x=torch.Tensor(features2),
edge_index=torch.LongTensor(edge_index2).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData2.target = torch.LongTensor([target2])
GCNData2.__setitem__('c_size', torch.LongTensor([c_size2]))
return GCNData1, GCNData2
def len(self):
return len(self.train_x1_index)
def get(self, idx):
x1_index = self.train_x1_index[idx]
x2_index = self.train_x2_index[idx]
xd1 = self.train_d[x1_index]
xd2 = self.train_d[x2_index]
xt1 = self.train_t[x1_index]
xt2 = self.train_t[x2_index]
Y = self.y[idx]
data1, data2 = self.process(xd1, xd2, xt1, xt2, Y, self.smile_graph)
return data1, data2
class TestDataset(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,test_index=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset, self).__init__(root,transform, pre_transform)
self.test_index = test_index
self.max_len = max([len(i) for i in self.test_index])
self.data_list = self.process(xd, xt, y,smile_graph)
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_test_index = self.test_index[idx]
return_data = [ self.data_list[index] for index in return_test_index]
return return_data
class TestDataset1(Dataset):
def __init__(self, root='./', xd=None, xt=None, y=None, smile_graph=None,groupID=None,transform=None,
pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(TestDataset1, self).__init__(root,transform, pre_transform)
self.xd = xd
self.xt = xt
self.y = y
self.groupID = groupID
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def process(self, xd, xt, y, smile_graph):
assert (len(xd) == len(xt) and len(xt) == len(y)), "The three lists must be the same length!"
data_list = []
data_len = len(xd)
for i in range(data_len):
smiles = xd[i]
target = xt[i]
labels = y[i]
# convert SMILES to molecular representation using rdkit
c_size, features, edge_index = smile_graph[smiles]
# make the graph ready for PyTorch Geometrics GCN algorithms:
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
return data_list
def len(self):
return len(self.test_index)
def get(self, idx):
return_data = process( xd[idx], xt[idx], y[idx], smile_graph)
return_group = self.groupID[idx]
return (return_data, return_group)
class Data_Encoder(Dataset):
def __init__(self, root='./', data=None, transform=None, pre_transform=None):
#root is required for save preprocessed data, default is '/tmp'
super(Data_Encoder, self).__init__(root,transform, pre_transform)
self.data = data
@property
def raw_file_names(self):
pass
@property
def processed_file_names(self):
pass
def download(self):
pass
def _download(self):
pass
def _process(self):
pass
def len(self):
return len(self.data)
def get(self, idx):
return_data = self.data[idx]
return return_data | 0.602179 | 0.559711 |
import unittest
import inventoryanalytics.lotsizing.deterministic.constant.eoq as eoq
import numpy as np
class TestEOQ(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*d*K/h), places=2) # closed-form
self.assertAlmostEqual(self.eoq.compute_eoq(), 252.98, places=2)
def test_cost(self):
Q = self.eoq.compute_eoq()
self.assertAlmostEqual(1020.72, self.eoq.cost(Q), places=2)
K, h, d, v = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.v
self.assertAlmostEqual(self.eoq.cost(Q),
np.sqrt(2*K*h*d)+v*d, places=2) # closed-form
def test_relevant_cost(self):
Q = self.eoq.compute_eoq()
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.relevant_cost(Q),
np.sqrt(2*K*h*d), places=2) # closed-form
def test_itr(self):
self.assertAlmostEqual(self.eoq.itr(), 18.97, places=2)
def test_sensitivity_to_Q(self):
Q = 30
Qopt = self.eoq.compute_eoq()
d, v = self.eoq.d, self.eoq.v
self.assertAlmostEquals(self.eoq.sensitivity_to_Q(Q), (self.eoq.cost(Q)-d*v)/(self.eoq.cost(Qopt)-d*v), places=2)
def test_reorder_point(self):
L = 1/12
self.assertAlmostEquals(self.eoq.reorder_point(L), 200, places=2)
def test_coverage(self):
self.assertAlmostEqual(self.eoq.coverage(), 1.26/12, places=2)
class TestEOQ_all_units_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_all_units_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 800, places=2)
self.assertAlmostEqual(pb.cost(Q), 978.6, places=2)
class TestEOQ_incremental_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_incremental_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 304.05, places=2)
self.assertAlmostEqual(pb.cost(Q), 1043.41, places=2)
class TestEOQ_planned_backorders(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.eoq = eoq.eoq_planned_backorders(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d, p = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.p
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*K*d*(h+p)/(h*p)),
places=2) # closed-form
class TestEPQ(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.epq = eoq.epq(**instance)
def tearDown(self):
pass
def test_epq(self):
K, h, d, p = self.epq.K, self.epq.h, self.epq.d, self.epq.p
rho = p/d
self.assertAlmostEqual(self.epq.compute_epq(),
np.sqrt(2*K*d/(h*(1-rho))),
places=2) # closed-form | inventoryanalytics/lotsizing/deterministic/constant/test/eoq_test.py | import unittest
import inventoryanalytics.lotsizing.deterministic.constant.eoq as eoq
import numpy as np
class TestEOQ(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*d*K/h), places=2) # closed-form
self.assertAlmostEqual(self.eoq.compute_eoq(), 252.98, places=2)
def test_cost(self):
Q = self.eoq.compute_eoq()
self.assertAlmostEqual(1020.72, self.eoq.cost(Q), places=2)
K, h, d, v = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.v
self.assertAlmostEqual(self.eoq.cost(Q),
np.sqrt(2*K*h*d)+v*d, places=2) # closed-form
def test_relevant_cost(self):
Q = self.eoq.compute_eoq()
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.relevant_cost(Q),
np.sqrt(2*K*h*d), places=2) # closed-form
def test_itr(self):
self.assertAlmostEqual(self.eoq.itr(), 18.97, places=2)
def test_sensitivity_to_Q(self):
Q = 30
Qopt = self.eoq.compute_eoq()
d, v = self.eoq.d, self.eoq.v
self.assertAlmostEquals(self.eoq.sensitivity_to_Q(Q), (self.eoq.cost(Q)-d*v)/(self.eoq.cost(Qopt)-d*v), places=2)
def test_reorder_point(self):
L = 1/12
self.assertAlmostEquals(self.eoq.reorder_point(L), 200, places=2)
def test_coverage(self):
self.assertAlmostEqual(self.eoq.coverage(), 1.26/12, places=2)
class TestEOQ_all_units_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_all_units_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 800, places=2)
self.assertAlmostEqual(pb.cost(Q), 978.6, places=2)
class TestEOQ_incremental_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_incremental_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 304.05, places=2)
self.assertAlmostEqual(pb.cost(Q), 1043.41, places=2)
class TestEOQ_planned_backorders(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.eoq = eoq.eoq_planned_backorders(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d, p = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.p
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*K*d*(h+p)/(h*p)),
places=2) # closed-form
class TestEPQ(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.epq = eoq.epq(**instance)
def tearDown(self):
pass
def test_epq(self):
K, h, d, p = self.epq.K, self.epq.h, self.epq.d, self.epq.p
rho = p/d
self.assertAlmostEqual(self.epq.compute_epq(),
np.sqrt(2*K*d/(h*(1-rho))),
places=2) # closed-form | 0.457137 | 0.760951 |
from matchmaker.losses.lambdarank import *
from matchmaker.losses.listnet import *
from matchmaker.losses.ranknet import *
from matchmaker.losses.msmargin import *
from matchmaker.losses.teacher_kldiv_list import *
from matchmaker.losses.teacher_kldiv_pointwise import *
from matchmaker.losses.teacher_mse_pointwise import *
from matchmaker.losses.teacher_ranknetweighted import *
from matchmaker.losses.teacher_mse_ranknet import *
from matchmaker.losses.QA_StartEndCrossEntropy import *
def merge_loss(losses, log_vars):
loss = torch.zeros(1,device=log_vars.device)
weighted_losses = []
for l in range(len(losses)):
precision = torch.exp(-log_vars[l])
wl = torch.sum(precision * losses[l] + log_vars[l], -1)
loss += wl
weighted_losses.append(wl.detach())
return torch.mean(loss),weighted_losses
def get_loss(config):
use_list_loss=False
use_inbatch_list_loss=False
qa_loss=None
inbatch_loss=None
if config["loss"] == "margin-mse":
loss = MSMarginLoss()
elif config["loss"] == "MSETeacherPointwise":
loss = MSETeacherPointwise()
elif config["loss"] == "MSETeacherPointwisePassages":
loss = MSETeacherPointwisePassages()
elif config["loss"] == "MarginMSE_InterPassageLoss":
loss = MarginMSE_InterPassageLoss()
elif config["loss"] == "KLDivTeacherPointwise":
loss = KLDivTeacherPointwise()
elif config["loss"] == "RankNetTeacher":
loss = RankNetTeacher()
elif config["loss"] == "MSERanknetTeacher":
loss = MSERanknetTeacher()
elif config["loss"] == "ranknet":
loss = RankNetLoss()
elif config["loss"] == "margin":
loss = torch.nn.MarginRankingLoss(margin=1, reduction='mean')
elif config["loss"] == "mrr":
loss = SmoothMRRLoss()
use_list_loss = True
elif config["loss"] == "listnet":
loss = ListNetLoss()
use_list_loss = True
elif config["loss"] == "lambdarank":
loss = LambdaLoss("ndcgLoss2_scheme")
use_list_loss = True
else:
raise Exception("Loss not known")
if config["train_qa_spans"]:
if config["qa_loss"] == "StartEndCrossEntropy":
qa_loss = QA_StartEndCrossEntropy()
else:
raise Exception("QA-Loss not known, qa_loss must be set with train_qa_spans")
if config["in_batch_negatives"]:
if config["in_batch_neg_loss"] == "ranknet":
inbatch_loss = RankNetLoss()
elif config["in_batch_neg_loss"] == "margin-mse":
inbatch_loss = MSMarginLoss()
elif config["in_batch_neg_loss"] == "KLDivTeacherList":
inbatch_loss = KLDivTeacherList()
use_inbatch_list_loss = True
elif config["in_batch_neg_loss"] == "listnet":
inbatch_loss = ListNetLoss()
use_inbatch_list_loss = True
elif config["in_batch_neg_loss"] == "lambdarank":
inbatch_loss = LambdaLossTeacher("ndcgLoss2_scheme")
use_inbatch_list_loss = True
else:
raise Exception("In-batch-Loss not known, in_batch_neg_loss must be set with in_batch_negatives")
return loss, qa_loss, inbatch_loss, use_list_loss,use_inbatch_list_loss | matchmaker/losses/all.py | from matchmaker.losses.lambdarank import *
from matchmaker.losses.listnet import *
from matchmaker.losses.ranknet import *
from matchmaker.losses.msmargin import *
from matchmaker.losses.teacher_kldiv_list import *
from matchmaker.losses.teacher_kldiv_pointwise import *
from matchmaker.losses.teacher_mse_pointwise import *
from matchmaker.losses.teacher_ranknetweighted import *
from matchmaker.losses.teacher_mse_ranknet import *
from matchmaker.losses.QA_StartEndCrossEntropy import *
def merge_loss(losses, log_vars):
loss = torch.zeros(1,device=log_vars.device)
weighted_losses = []
for l in range(len(losses)):
precision = torch.exp(-log_vars[l])
wl = torch.sum(precision * losses[l] + log_vars[l], -1)
loss += wl
weighted_losses.append(wl.detach())
return torch.mean(loss),weighted_losses
def get_loss(config):
use_list_loss=False
use_inbatch_list_loss=False
qa_loss=None
inbatch_loss=None
if config["loss"] == "margin-mse":
loss = MSMarginLoss()
elif config["loss"] == "MSETeacherPointwise":
loss = MSETeacherPointwise()
elif config["loss"] == "MSETeacherPointwisePassages":
loss = MSETeacherPointwisePassages()
elif config["loss"] == "MarginMSE_InterPassageLoss":
loss = MarginMSE_InterPassageLoss()
elif config["loss"] == "KLDivTeacherPointwise":
loss = KLDivTeacherPointwise()
elif config["loss"] == "RankNetTeacher":
loss = RankNetTeacher()
elif config["loss"] == "MSERanknetTeacher":
loss = MSERanknetTeacher()
elif config["loss"] == "ranknet":
loss = RankNetLoss()
elif config["loss"] == "margin":
loss = torch.nn.MarginRankingLoss(margin=1, reduction='mean')
elif config["loss"] == "mrr":
loss = SmoothMRRLoss()
use_list_loss = True
elif config["loss"] == "listnet":
loss = ListNetLoss()
use_list_loss = True
elif config["loss"] == "lambdarank":
loss = LambdaLoss("ndcgLoss2_scheme")
use_list_loss = True
else:
raise Exception("Loss not known")
if config["train_qa_spans"]:
if config["qa_loss"] == "StartEndCrossEntropy":
qa_loss = QA_StartEndCrossEntropy()
else:
raise Exception("QA-Loss not known, qa_loss must be set with train_qa_spans")
if config["in_batch_negatives"]:
if config["in_batch_neg_loss"] == "ranknet":
inbatch_loss = RankNetLoss()
elif config["in_batch_neg_loss"] == "margin-mse":
inbatch_loss = MSMarginLoss()
elif config["in_batch_neg_loss"] == "KLDivTeacherList":
inbatch_loss = KLDivTeacherList()
use_inbatch_list_loss = True
elif config["in_batch_neg_loss"] == "listnet":
inbatch_loss = ListNetLoss()
use_inbatch_list_loss = True
elif config["in_batch_neg_loss"] == "lambdarank":
inbatch_loss = LambdaLossTeacher("ndcgLoss2_scheme")
use_inbatch_list_loss = True
else:
raise Exception("In-batch-Loss not known, in_batch_neg_loss must be set with in_batch_negatives")
return loss, qa_loss, inbatch_loss, use_list_loss,use_inbatch_list_loss | 0.564098 | 0.181934 |
import os
import tempfile
from copy import copy
import numpy as np
import rasterio as rio
from elapid import utils
# set the test raster data paths
directory_path, script_path = os.path.split(os.path.abspath(__file__))
data_path = os.path.join(directory_path, "data")
raster_1b = os.path.join(data_path, "test-raster-1band.tif")
raster_2b = os.path.join(data_path, "test-raster-2bands.tif")
raster_1b_offset = os.path.join(data_path, "test-raster-1band-offset.tif")
with rio.open(raster_1b, "r") as src:
raster_1b_profile = copy(src.profile)
def test_repeat_array():
n_elements = 10
x = np.zeros(n_elements)
# repeating row-wise
r1 = utils.repeat_array(x, length=1, axis=0)
r2 = utils.repeat_array(x, length=2, axis=0)
assert r1.shape == (1, n_elements)
assert r2.shape == (2, n_elements)
# repeating column-wise
c1 = utils.repeat_array(x, length=1, axis=1)
c2 = utils.repeat_array(x, length=2, axis=1)
assert c1.shape == (n_elements, 1)
assert c2.shape == (n_elements, 2)
def test_load_sample_data():
x, y = utils.load_sample_data(name="bradypus")
assert x.shape == (1116, 14), "Dataframe not verified dimensions"
assert len(x) == len(y), "Nuber of x/y rows must match"
assert y.min() == 0, "y data should only contain 0/1"
assert y.max() == 1, "y data should only contain 0/1"
# data verified from first row
first_record = np.array([76, 104, 10, 2, 121, 46, 84, 41, 54, 3, 192, 266, 337, 279])
diff = x.iloc[0].to_numpy() - first_record
assert diff.sum() == 0, "First row of bradypus data incorrectly read"
def test_save_object():
obj = np.zeros(10)
try:
with tempfile.NamedTemporaryFile() as tf:
temp_name = tf.name
utils.save_object(obj, temp_name, compress=False)
uncompressed_size = os.path.getsize(temp_name)
assert uncompressed_size > 0, "Saved file should be greater than zero bytes"
utils.save_object(obj, temp_name, compress=True)
compressed_size = os.path.getsize(temp_name)
assert compressed_size < uncompressed_size, "Compressed size should be smaller than uncompressed"
except PermissionError:
pass
def test_load_object():
n_elements = 10
obj = np.zeros(n_elements)
obj[-1] = n_elements
compress = False
with tempfile.NamedTemporaryFile() as tf:
temp_name = tf.name
utils.save_object(obj, temp_name, compress=compress)
loaded_obj = utils.load_object(temp_name, compressed=compress)
assert len(loaded_obj) == n_elements, "Loaded object doesn't match shape of saved object"
assert loaded_obj[-1] == n_elements, "Loaded object doesn't match data content of saved object"
def test_create_output_raster_profile():
raster_paths = [raster_1b, raster_2b]
nodata = -9999
windows, output_profile = utils.create_output_raster_profile(raster_paths, template_idx=0, nodata=nodata)
# window check
nwindows = len(list(windows))
assert nwindows == 1
# profile check
assert raster_1b_profile["width"] == output_profile["width"]
assert raster_1b_profile["nodata"] != output_profile["nodata"]
def test_get_raster_band_indexes():
raster_paths = [raster_1b, raster_2b]
nbands, index = utils.get_raster_band_indexes(raster_paths)
assert nbands == 3
assert index == [0, 1, 3]
def test_check_raster_alignment():
# fail on misaligned
raster_paths = [raster_1b, raster_1b_offset]
aligned = utils.check_raster_alignment(raster_paths)
assert aligned is False
# succeed on aligned
raster_paths = [raster_1b, raster_2b]
aligned = utils.check_raster_alignment(raster_paths)
assert aligned is True
def test_in_notebook():
assert utils.in_notebook() is False
def test_get_tqdm():
tqdm = utils.get_tqdm()
methods = dir(tqdm)
assert "monitor_interval" in methods
assert "tqdm_notebook" not in methods, "Returned tqdm should not be the base module"
def test_n_digits():
assert utils.n_digits(1) == 1
assert utils.n_digits(11) == 2
assert utils.n_digits(111) == 3
def test_count_raster_bands():
list_2b = [raster_1b, raster_1b]
list_3b = [raster_1b, raster_2b]
assert utils.count_raster_bands(list_2b) == 2
assert utils.count_raster_bands(list_3b) == 3
def test_make_band_labels():
n_bands = 1
labels = utils.make_band_labels(n_bands)
assert len(labels) == n_bands
n_bands = 10
labels = utils.make_band_labels(n_bands)
assert len(labels) == n_bands | tests/test_utils.py |
import os
import tempfile
from copy import copy
import numpy as np
import rasterio as rio
from elapid import utils
# set the test raster data paths
directory_path, script_path = os.path.split(os.path.abspath(__file__))
data_path = os.path.join(directory_path, "data")
raster_1b = os.path.join(data_path, "test-raster-1band.tif")
raster_2b = os.path.join(data_path, "test-raster-2bands.tif")
raster_1b_offset = os.path.join(data_path, "test-raster-1band-offset.tif")
with rio.open(raster_1b, "r") as src:
raster_1b_profile = copy(src.profile)
def test_repeat_array():
n_elements = 10
x = np.zeros(n_elements)
# repeating row-wise
r1 = utils.repeat_array(x, length=1, axis=0)
r2 = utils.repeat_array(x, length=2, axis=0)
assert r1.shape == (1, n_elements)
assert r2.shape == (2, n_elements)
# repeating column-wise
c1 = utils.repeat_array(x, length=1, axis=1)
c2 = utils.repeat_array(x, length=2, axis=1)
assert c1.shape == (n_elements, 1)
assert c2.shape == (n_elements, 2)
def test_load_sample_data():
x, y = utils.load_sample_data(name="bradypus")
assert x.shape == (1116, 14), "Dataframe not verified dimensions"
assert len(x) == len(y), "Nuber of x/y rows must match"
assert y.min() == 0, "y data should only contain 0/1"
assert y.max() == 1, "y data should only contain 0/1"
# data verified from first row
first_record = np.array([76, 104, 10, 2, 121, 46, 84, 41, 54, 3, 192, 266, 337, 279])
diff = x.iloc[0].to_numpy() - first_record
assert diff.sum() == 0, "First row of bradypus data incorrectly read"
def test_save_object():
obj = np.zeros(10)
try:
with tempfile.NamedTemporaryFile() as tf:
temp_name = tf.name
utils.save_object(obj, temp_name, compress=False)
uncompressed_size = os.path.getsize(temp_name)
assert uncompressed_size > 0, "Saved file should be greater than zero bytes"
utils.save_object(obj, temp_name, compress=True)
compressed_size = os.path.getsize(temp_name)
assert compressed_size < uncompressed_size, "Compressed size should be smaller than uncompressed"
except PermissionError:
pass
def test_load_object():
n_elements = 10
obj = np.zeros(n_elements)
obj[-1] = n_elements
compress = False
with tempfile.NamedTemporaryFile() as tf:
temp_name = tf.name
utils.save_object(obj, temp_name, compress=compress)
loaded_obj = utils.load_object(temp_name, compressed=compress)
assert len(loaded_obj) == n_elements, "Loaded object doesn't match shape of saved object"
assert loaded_obj[-1] == n_elements, "Loaded object doesn't match data content of saved object"
def test_create_output_raster_profile():
raster_paths = [raster_1b, raster_2b]
nodata = -9999
windows, output_profile = utils.create_output_raster_profile(raster_paths, template_idx=0, nodata=nodata)
# window check
nwindows = len(list(windows))
assert nwindows == 1
# profile check
assert raster_1b_profile["width"] == output_profile["width"]
assert raster_1b_profile["nodata"] != output_profile["nodata"]
def test_get_raster_band_indexes():
raster_paths = [raster_1b, raster_2b]
nbands, index = utils.get_raster_band_indexes(raster_paths)
assert nbands == 3
assert index == [0, 1, 3]
def test_check_raster_alignment():
# fail on misaligned
raster_paths = [raster_1b, raster_1b_offset]
aligned = utils.check_raster_alignment(raster_paths)
assert aligned is False
# succeed on aligned
raster_paths = [raster_1b, raster_2b]
aligned = utils.check_raster_alignment(raster_paths)
assert aligned is True
def test_in_notebook():
assert utils.in_notebook() is False
def test_get_tqdm():
tqdm = utils.get_tqdm()
methods = dir(tqdm)
assert "monitor_interval" in methods
assert "tqdm_notebook" not in methods, "Returned tqdm should not be the base module"
def test_n_digits():
assert utils.n_digits(1) == 1
assert utils.n_digits(11) == 2
assert utils.n_digits(111) == 3
def test_count_raster_bands():
list_2b = [raster_1b, raster_1b]
list_3b = [raster_1b, raster_2b]
assert utils.count_raster_bands(list_2b) == 2
assert utils.count_raster_bands(list_3b) == 3
def test_make_band_labels():
n_bands = 1
labels = utils.make_band_labels(n_bands)
assert len(labels) == n_bands
n_bands = 10
labels = utils.make_band_labels(n_bands)
assert len(labels) == n_bands | 0.530236 | 0.607285 |
import logging
import sys
from copy import deepcopy
if sys.version_info < (3, 3):
from collections import Mapping
else:
from collections.abc import Mapping
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["get_data_lines"]
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
return deepcopy(self)
obj.copy = copy
return obj
def get_data_lines(data, fun_key, space_per_level=2, fun_val=None):
"""
Get text representation lines for a mapping's data.
:param Mapping data: collection of data for which to get repr lines
:param function(object, prefix) -> str fun_key: function to render key
as text
:param function(object, prefix) -> str fun_val: function to render value
as text
:param int space_per_level: number of spaces per level of nesting
:return Iterable[str]: collection of lines
"""
# If no specific value-render function, use key-render function
fun_val = fun_val or fun_key
def space(lev):
return " " * lev * space_per_level
# Render a line; pass val=<obj> for a line with a value (i.e., not header)
def render(lev, key, **kwargs):
ktext = fun_key(key) + ":"
try:
val = kwargs["val"]
except KeyError:
return space(lev) + ktext
else:
return space(lev) + "{} {}".format(
ktext, "null" if val is None else fun_val(val, space(lev))
)
def go(kvs, curr_lev, acc):
try:
k, v = next(kvs)
except StopIteration:
return acc
if not isinstance(v, Mapping) or len(v) == 0:
# Add line representing single key-value or empty mapping
acc.append(render(curr_lev, k, val=v))
else:
# Add section header and section data.
acc.append(render(curr_lev, k))
acc.append("\n".join(go(iter(v.items()), curr_lev + 1, [])))
return go(kvs, curr_lev, acc)
return go(iter(data.items()), 0, [])
def get_logger(name):
"""
Return a logger equipped with a null handler.
:param str name: name for the Logger
:return logging.Logger: simple Logger instance with a NullHandler
"""
log = logging.getLogger(name)
log.addHandler(logging.NullHandler())
return log
def is_custom_map(obj):
"""
Determine whether an object is a Mapping other than dict.
:param object obj: object to examine
:return bool: whether the object is a Mapping other than dict
"""
return isinstance(obj, Mapping) and type(obj) is not dict
def safedel_message(key):
"""
Create safe deletion log message.
:param hashable key: unmapped key for which deletion/removal was tried
:return str: message to log unmapped key deletion attempt.
"""
return "No key {} to delete".format(key) | attmap/helpers.py |
import logging
import sys
from copy import deepcopy
if sys.version_info < (3, 3):
from collections import Mapping
else:
from collections.abc import Mapping
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["get_data_lines"]
def copy(obj):
def copy(self):
"""
Copy self to a new object.
"""
return deepcopy(self)
obj.copy = copy
return obj
def get_data_lines(data, fun_key, space_per_level=2, fun_val=None):
"""
Get text representation lines for a mapping's data.
:param Mapping data: collection of data for which to get repr lines
:param function(object, prefix) -> str fun_key: function to render key
as text
:param function(object, prefix) -> str fun_val: function to render value
as text
:param int space_per_level: number of spaces per level of nesting
:return Iterable[str]: collection of lines
"""
# If no specific value-render function, use key-render function
fun_val = fun_val or fun_key
def space(lev):
return " " * lev * space_per_level
# Render a line; pass val=<obj> for a line with a value (i.e., not header)
def render(lev, key, **kwargs):
ktext = fun_key(key) + ":"
try:
val = kwargs["val"]
except KeyError:
return space(lev) + ktext
else:
return space(lev) + "{} {}".format(
ktext, "null" if val is None else fun_val(val, space(lev))
)
def go(kvs, curr_lev, acc):
try:
k, v = next(kvs)
except StopIteration:
return acc
if not isinstance(v, Mapping) or len(v) == 0:
# Add line representing single key-value or empty mapping
acc.append(render(curr_lev, k, val=v))
else:
# Add section header and section data.
acc.append(render(curr_lev, k))
acc.append("\n".join(go(iter(v.items()), curr_lev + 1, [])))
return go(kvs, curr_lev, acc)
return go(iter(data.items()), 0, [])
def get_logger(name):
"""
Return a logger equipped with a null handler.
:param str name: name for the Logger
:return logging.Logger: simple Logger instance with a NullHandler
"""
log = logging.getLogger(name)
log.addHandler(logging.NullHandler())
return log
def is_custom_map(obj):
"""
Determine whether an object is a Mapping other than dict.
:param object obj: object to examine
:return bool: whether the object is a Mapping other than dict
"""
return isinstance(obj, Mapping) and type(obj) is not dict
def safedel_message(key):
"""
Create safe deletion log message.
:param hashable key: unmapped key for which deletion/removal was tried
:return str: message to log unmapped key deletion attempt.
"""
return "No key {} to delete".format(key) | 0.514888 | 0.263173 |
from pg2avro import get_avro_schema, ColumnMapping, get_avro_row_dict
from sqlalchemy import (
Column,
BIGINT,
BOOLEAN,
CHAR,
DATE,
INTEGER,
NUMERIC,
SMALLINT,
TEXT,
VARCHAR,
TIME,
)
from sqlalchemy.dialects.postgresql import (
ARRAY,
INTERVAL,
TIMESTAMP,
ENUM,
UUID,
JSONB,
JSON,
DOUBLE_PRECISION,
)
from typing import Optional
def test_get_avro_schema_sqlalchemy():
"""
Test sqlalchemy integration.
TODO: Cover all sql/postgres types.
"""
custom_enum_type = ("value_1", "value_2")
columns = [
Column(SMALLINT, name="smallint", nullable=False),
Column(BIGINT, name="bigint", nullable=False),
Column(INTEGER, name="integer", nullable=False),
Column(NUMERIC(10, 2), name="numeric", nullable=False),
Column(NUMERIC(10, 10), name="numeric_to_double", nullable=False),
Column(NUMERIC, name="numeric_defaults", nullable=False),
Column(NUMERIC, name="numeric_nullable", nullable=True),
Column(DOUBLE_PRECISION, name="double_precision", nullable=False),
Column(BOOLEAN, name="bool", nullable=False),
Column(DATE, name="date", nullable=False),
Column(TIME, name="time", nullable=False),
Column(TIMESTAMP, name="timestamp", nullable=False),
Column(CHAR, name="char", nullable=False),
Column(TEXT, name="text", nullable=True),
Column(VARCHAR(255), primary_key=True, name="varchar", nullable=False),
Column(ARRAY(VARCHAR), name="array", nullable=False),
Column(INTERVAL, name="interval", nullable=False),
Column(ENUM(name="some_enum", *custom_enum_type), name="enum", nullable=False),
Column(UUID, name="uuid", nullable=False),
Column(JSONB, name="jsonb", nullable=False),
Column(JSON, name="json", nullable=False),
]
table_name = "test_table"
namespace = "test_namespace"
expected = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "smallint", "type": "int"},
{"name": "bigint", "type": "long"},
{"name": "integer", "type": "int"},
{
"name": "numeric",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 10,
"scale": 2,
},
},
{"name": "numeric_to_double", "type": "double"},
{
"name": "numeric_defaults",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
},
{
"name": "numeric_nullable",
"type": [
"null",
{
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
],
},
{"name": "double_precision", "type": "double"},
{"name": "bool", "type": "boolean"},
{"name": "date", "type": {"logicalType": "date", "type": "int"}},
{
"name": "time",
"type": {"logicalType": "timestamp-millis", "type": "int"},
},
{
"name": "timestamp",
"type": {"logicalType": "timestamp-millis", "type": "long"},
},
{"name": "char", "type": "string"},
{"name": "text", "type": ["null", "string"]},
{"name": "varchar", "type": "string"},
{"name": "array", "type": {"items": "string", "type": "array"}},
{"name": "interval", "type": "string"},
{"name": "enum", "type": "string"},
{"name": "uuid", "type": "string"},
{"name": "jsonb", "type": "string"},
{"name": "json", "type": "string"},
],
}
actual = get_avro_schema(table_name, namespace, columns)
assert expected == actual
def test_get_avro_schema_custom_mapping():
"""
Test custom integration using mapping class.
TODO: Cover all sql/postgres types.
"""
class Col:
def __init__(
self,
n: str,
un: str,
nul: bool,
np: Optional[int] = None,
ns: Optional[int] = None,
):
self.n = n
self.un = un
self.nul = nul
self.np = np
self.ns = ns
columns = [
Col(n="smallint", un="int2", nul=False),
Col(n="bigint", un="int8", nul=False),
Col(n="integer", un="int4", nul=False),
Col(n="numeric", un="numeric", nul=False, np=3, ns=7),
Col(n="numeric_to_double", un="numeric", nul=False, np=10, ns=10),
Col(n="numeric_defaults", un="numeric", nul=False),
Col(n="numeric_nullable", un="numeric", nul=True),
Col(n="double_precision", un="float8", nul=False),
Col(n="real", un="float4", nul=False),
Col(n="bool", un="bool", nul=False),
Col(n="char", un="char", nul=False),
Col(n="bpchar", un="bpchar", nul=False),
Col(n="varchar", un="varchar", nul=False),
Col(n="array", un="_varchar", nul=False),
Col(n="array_n", un="_varchar", nul=True),
Col(n="date", un="date", nul=False),
Col(n="time", un="time", nul=False),
Col(n="timestamp", un="timestamp", nul=False),
Col(n="enum", un="custom_type", nul=False),
Col(n="uuid", un="uuid", nul=False),
Col(n="json", un="json", nul=False),
Col(n="jsonb", un="jsonb", nul=False),
]
table_name = "test_table"
namespace = "test_namespace"
expected = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "smallint", "type": "int"},
{"name": "bigint", "type": "long"},
{"name": "integer", "type": "int"},
{
"name": "numeric",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 3,
"scale": 7,
},
},
{"name": "numeric_to_double", "type": "double"},
{
"name": "numeric_defaults",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
},
{
"name": "numeric_nullable",
"type": [
"null",
{
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
],
},
{"name": "double_precision", "type": "double"},
{"name": "real", "type": "float"},
{"name": "bool", "type": "boolean"},
{"name": "char", "type": "string"},
{"name": "bpchar", "type": "string"},
{"name": "varchar", "type": "string"},
{"name": "array", "type": {"items": "string", "type": "array"}},
{"name": "array_n", "type": ["null", {"items": "string", "type": "array"}]},
{"name": "date", "type": {"logicalType": "date", "type": "int"}},
{
"name": "time",
"type": {"logicalType": "timestamp-millis", "type": "int"},
},
{
"name": "timestamp",
"type": {"logicalType": "timestamp-millis", "type": "long"},
},
{"name": "enum", "type": "string"},
{"name": "uuid", "type": "string"},
{"name": "json", "type": "string"},
{"name": "jsonb", "type": "string"},
],
}
actual = get_avro_schema(
table_name,
namespace,
columns,
ColumnMapping(
name="n",
type="un",
nullable="nul",
numeric_precision="np",
numeric_scale="ns",
),
)
assert expected == actual
def test_mapping_overrides():
"""
Test mapping overrides
"""
from pg2avro.pg2avro import Column
table_name = "test_table"
namespace = "test_namespace"
columns = [
Column(name="int_to_string", type="int"),
Column(name="string_to_numeric", type="string"),
Column(name="not_overriden", type="int"),
Column(name="numeric_to_float", type="numeric"),
Column(name="array_to_string", type="_varchar"),
Column(name="string_to_array", type="varchar"),
]
overrides = {
"int_to_string": {"pg_type": "string", "python_type": str},
"string_to_numeric": {"pg_type": "numeric", "python_type": float},
"not_matching_override_name": {"pg_type": "int", "python_type": int},
"numeric_to_float": {"pg_type": "float8", "python_type": float},
"array_to_string": {"pg_type": "string", "python_type": str},
"string_to_array": {"pg_type": "_string", "python_type": list},
}
expected_schema = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "int_to_string", "type": ["null", "string"]},
{
"name": "string_to_numeric",
"type": [
"null",
{
"type": "bytes",
"logicalType": "decimal",
"precision": 38,
"scale": 9,
},
],
},
{"name": "not_overriden", "type": ["null", "int"]},
{"name": "numeric_to_float", "type": ["null", "double"]},
{"name": "array_to_string", "type": ["null", "string"]},
{
"name": "string_to_array",
"type": ["null", {"type": "array", "items": "string"}],
},
],
}
schema = get_avro_schema(
table_name, namespace, columns, mapping_overrides=overrides
)
assert expected_schema == schema
# Now data
rows_data = [
{
"int_to_string": 1,
"string_to_numeric": "2.0",
"not_overriden": 3,
"numeric_to_float": 0.12345678910,
"array_to_string": [1, 2, "a", "b"],
"string_to_array": "asd",
},
{
"int_to_string": None,
"string_to_numeric": None,
"not_overriden": None,
"numeric_to_float": None,
"array_to_string": None,
"string_to_array": None,
},
]
expected = [
{
"int_to_string": "1",
"string_to_numeric": 2.0,
"not_overriden": 3,
"numeric_to_float": 0.12345678910,
"array_to_string": "[1, 2, 'a', 'b']",
"string_to_array": ["a", "s", "d"],
},
{
"int_to_string": None,
"string_to_numeric": None,
"not_overriden": None,
"numeric_to_float": None,
"array_to_string": None,
"string_to_array": None,
},
]
actual = [get_avro_row_dict(r, schema, overrides) for r in rows_data]
assert expected == actual | tests/test_schema_types.py | from pg2avro import get_avro_schema, ColumnMapping, get_avro_row_dict
from sqlalchemy import (
Column,
BIGINT,
BOOLEAN,
CHAR,
DATE,
INTEGER,
NUMERIC,
SMALLINT,
TEXT,
VARCHAR,
TIME,
)
from sqlalchemy.dialects.postgresql import (
ARRAY,
INTERVAL,
TIMESTAMP,
ENUM,
UUID,
JSONB,
JSON,
DOUBLE_PRECISION,
)
from typing import Optional
def test_get_avro_schema_sqlalchemy():
"""
Test sqlalchemy integration.
TODO: Cover all sql/postgres types.
"""
custom_enum_type = ("value_1", "value_2")
columns = [
Column(SMALLINT, name="smallint", nullable=False),
Column(BIGINT, name="bigint", nullable=False),
Column(INTEGER, name="integer", nullable=False),
Column(NUMERIC(10, 2), name="numeric", nullable=False),
Column(NUMERIC(10, 10), name="numeric_to_double", nullable=False),
Column(NUMERIC, name="numeric_defaults", nullable=False),
Column(NUMERIC, name="numeric_nullable", nullable=True),
Column(DOUBLE_PRECISION, name="double_precision", nullable=False),
Column(BOOLEAN, name="bool", nullable=False),
Column(DATE, name="date", nullable=False),
Column(TIME, name="time", nullable=False),
Column(TIMESTAMP, name="timestamp", nullable=False),
Column(CHAR, name="char", nullable=False),
Column(TEXT, name="text", nullable=True),
Column(VARCHAR(255), primary_key=True, name="varchar", nullable=False),
Column(ARRAY(VARCHAR), name="array", nullable=False),
Column(INTERVAL, name="interval", nullable=False),
Column(ENUM(name="some_enum", *custom_enum_type), name="enum", nullable=False),
Column(UUID, name="uuid", nullable=False),
Column(JSONB, name="jsonb", nullable=False),
Column(JSON, name="json", nullable=False),
]
table_name = "test_table"
namespace = "test_namespace"
expected = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "smallint", "type": "int"},
{"name": "bigint", "type": "long"},
{"name": "integer", "type": "int"},
{
"name": "numeric",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 10,
"scale": 2,
},
},
{"name": "numeric_to_double", "type": "double"},
{
"name": "numeric_defaults",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
},
{
"name": "numeric_nullable",
"type": [
"null",
{
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
],
},
{"name": "double_precision", "type": "double"},
{"name": "bool", "type": "boolean"},
{"name": "date", "type": {"logicalType": "date", "type": "int"}},
{
"name": "time",
"type": {"logicalType": "timestamp-millis", "type": "int"},
},
{
"name": "timestamp",
"type": {"logicalType": "timestamp-millis", "type": "long"},
},
{"name": "char", "type": "string"},
{"name": "text", "type": ["null", "string"]},
{"name": "varchar", "type": "string"},
{"name": "array", "type": {"items": "string", "type": "array"}},
{"name": "interval", "type": "string"},
{"name": "enum", "type": "string"},
{"name": "uuid", "type": "string"},
{"name": "jsonb", "type": "string"},
{"name": "json", "type": "string"},
],
}
actual = get_avro_schema(table_name, namespace, columns)
assert expected == actual
def test_get_avro_schema_custom_mapping():
"""
Test custom integration using mapping class.
TODO: Cover all sql/postgres types.
"""
class Col:
def __init__(
self,
n: str,
un: str,
nul: bool,
np: Optional[int] = None,
ns: Optional[int] = None,
):
self.n = n
self.un = un
self.nul = nul
self.np = np
self.ns = ns
columns = [
Col(n="smallint", un="int2", nul=False),
Col(n="bigint", un="int8", nul=False),
Col(n="integer", un="int4", nul=False),
Col(n="numeric", un="numeric", nul=False, np=3, ns=7),
Col(n="numeric_to_double", un="numeric", nul=False, np=10, ns=10),
Col(n="numeric_defaults", un="numeric", nul=False),
Col(n="numeric_nullable", un="numeric", nul=True),
Col(n="double_precision", un="float8", nul=False),
Col(n="real", un="float4", nul=False),
Col(n="bool", un="bool", nul=False),
Col(n="char", un="char", nul=False),
Col(n="bpchar", un="bpchar", nul=False),
Col(n="varchar", un="varchar", nul=False),
Col(n="array", un="_varchar", nul=False),
Col(n="array_n", un="_varchar", nul=True),
Col(n="date", un="date", nul=False),
Col(n="time", un="time", nul=False),
Col(n="timestamp", un="timestamp", nul=False),
Col(n="enum", un="custom_type", nul=False),
Col(n="uuid", un="uuid", nul=False),
Col(n="json", un="json", nul=False),
Col(n="jsonb", un="jsonb", nul=False),
]
table_name = "test_table"
namespace = "test_namespace"
expected = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "smallint", "type": "int"},
{"name": "bigint", "type": "long"},
{"name": "integer", "type": "int"},
{
"name": "numeric",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 3,
"scale": 7,
},
},
{"name": "numeric_to_double", "type": "double"},
{
"name": "numeric_defaults",
"type": {
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
},
{
"name": "numeric_nullable",
"type": [
"null",
{
"logicalType": "decimal",
"type": "bytes",
"precision": 38,
"scale": 9,
},
],
},
{"name": "double_precision", "type": "double"},
{"name": "real", "type": "float"},
{"name": "bool", "type": "boolean"},
{"name": "char", "type": "string"},
{"name": "bpchar", "type": "string"},
{"name": "varchar", "type": "string"},
{"name": "array", "type": {"items": "string", "type": "array"}},
{"name": "array_n", "type": ["null", {"items": "string", "type": "array"}]},
{"name": "date", "type": {"logicalType": "date", "type": "int"}},
{
"name": "time",
"type": {"logicalType": "timestamp-millis", "type": "int"},
},
{
"name": "timestamp",
"type": {"logicalType": "timestamp-millis", "type": "long"},
},
{"name": "enum", "type": "string"},
{"name": "uuid", "type": "string"},
{"name": "json", "type": "string"},
{"name": "jsonb", "type": "string"},
],
}
actual = get_avro_schema(
table_name,
namespace,
columns,
ColumnMapping(
name="n",
type="un",
nullable="nul",
numeric_precision="np",
numeric_scale="ns",
),
)
assert expected == actual
def test_mapping_overrides():
"""
Test mapping overrides
"""
from pg2avro.pg2avro import Column
table_name = "test_table"
namespace = "test_namespace"
columns = [
Column(name="int_to_string", type="int"),
Column(name="string_to_numeric", type="string"),
Column(name="not_overriden", type="int"),
Column(name="numeric_to_float", type="numeric"),
Column(name="array_to_string", type="_varchar"),
Column(name="string_to_array", type="varchar"),
]
overrides = {
"int_to_string": {"pg_type": "string", "python_type": str},
"string_to_numeric": {"pg_type": "numeric", "python_type": float},
"not_matching_override_name": {"pg_type": "int", "python_type": int},
"numeric_to_float": {"pg_type": "float8", "python_type": float},
"array_to_string": {"pg_type": "string", "python_type": str},
"string_to_array": {"pg_type": "_string", "python_type": list},
}
expected_schema = {
"name": table_name,
"namespace": namespace,
"type": "record",
"fields": [
{"name": "int_to_string", "type": ["null", "string"]},
{
"name": "string_to_numeric",
"type": [
"null",
{
"type": "bytes",
"logicalType": "decimal",
"precision": 38,
"scale": 9,
},
],
},
{"name": "not_overriden", "type": ["null", "int"]},
{"name": "numeric_to_float", "type": ["null", "double"]},
{"name": "array_to_string", "type": ["null", "string"]},
{
"name": "string_to_array",
"type": ["null", {"type": "array", "items": "string"}],
},
],
}
schema = get_avro_schema(
table_name, namespace, columns, mapping_overrides=overrides
)
assert expected_schema == schema
# Now data
rows_data = [
{
"int_to_string": 1,
"string_to_numeric": "2.0",
"not_overriden": 3,
"numeric_to_float": 0.12345678910,
"array_to_string": [1, 2, "a", "b"],
"string_to_array": "asd",
},
{
"int_to_string": None,
"string_to_numeric": None,
"not_overriden": None,
"numeric_to_float": None,
"array_to_string": None,
"string_to_array": None,
},
]
expected = [
{
"int_to_string": "1",
"string_to_numeric": 2.0,
"not_overriden": 3,
"numeric_to_float": 0.12345678910,
"array_to_string": "[1, 2, 'a', 'b']",
"string_to_array": ["a", "s", "d"],
},
{
"int_to_string": None,
"string_to_numeric": None,
"not_overriden": None,
"numeric_to_float": None,
"array_to_string": None,
"string_to_array": None,
},
]
actual = [get_avro_row_dict(r, schema, overrides) for r in rows_data]
assert expected == actual | 0.606615 | 0.319259 |
import sys, os, shutil, subprocess, pathlib
import json, re
IDENTIFIER_PAT = '[a-zA-Z]\\w*'
gconf = None
pconf = None
printe = lambda *args : print(*args, file=sys.stderr)
def loadgconf() :
global gconf
gconf = load_meta(getroot() + "/config.json")
pgconf:dict = load_meta(getroot() + "/private/config.json", default=dict())
for k, v in pgconf.items() :
gconf[k] = v
def isroot() :
return os.path.exists('.p5c')
def getroot() :
path = pathlib.Path.cwd()
this = './'
for p in [path] + list(path.parents) :
if p.joinpath('.p5c').exists() :
return this
this += '../'
return None
def setroot() :
global gconf
root = getroot()
if not root : die('P5C: not a p5c repository')
os.chdir(root)
loadgconf()
def getprob():
path = pathlib.Path.cwd()
this = './'
for p in [path] + list(path.parents):
if p.joinpath('.p5c-prob').exists():
return (this, p.name)
this += '../'
return None
def setprob():
global pconf
pn = getprob()
if not pn: die('P5C: not a p5c problem directory')
os.chdir(pn[0])
pconf = load_meta(pn[0], default=dict())
loadgconf()
return pn[1]
def checkparam(s, pat=IDENTIFIER_PAT) :
if not re.fullmatch(pat, s) :
die("input should match pattern '{0}'".format(pat))
def readparam(prompt='', pat=None, default=None) :
flag = False
while not flag :
s = input(prompt)
if s == '' :
if default is not None :
s = default
flag = True
else :
printe("input should not be empty!")
elif pat is not None :
if not re.fullmatch(pat, s) :
printe("input should match pattern '{0}'".format(pat))
else :
flag = True
else :
flag = True
return s
def die(*args) :
printe(*args)
exit(1)
def commit(msg, path='.') :
if subprocess.call(['git', 'add', '--verbose', path]) != 0 :
die("git: failed to add changes")
if subprocess.call(['git', 'commit', '-am', msg]) != 0 :
die("git: failed to commit changes")
def load_meta(fname='meta.json', default=[]) :
if not pathlib.Path(fname).is_file() :
if pathlib.Path(fname + '/meta.json').is_file() :
fname += '/meta.json'
else :
json.dump(default, open(fname, 'w'), indent=2)
return json.load(open(fname))
def save_meta(meta, fname='meta.json') :
if pathlib.Path(fname).is_dir() : fname += '/meta.json'
json.dump(meta, open(fname, 'w'), indent=2) | bin/common.py | import sys, os, shutil, subprocess, pathlib
import json, re
IDENTIFIER_PAT = '[a-zA-Z]\\w*'
gconf = None
pconf = None
printe = lambda *args : print(*args, file=sys.stderr)
def loadgconf() :
global gconf
gconf = load_meta(getroot() + "/config.json")
pgconf:dict = load_meta(getroot() + "/private/config.json", default=dict())
for k, v in pgconf.items() :
gconf[k] = v
def isroot() :
return os.path.exists('.p5c')
def getroot() :
path = pathlib.Path.cwd()
this = './'
for p in [path] + list(path.parents) :
if p.joinpath('.p5c').exists() :
return this
this += '../'
return None
def setroot() :
global gconf
root = getroot()
if not root : die('P5C: not a p5c repository')
os.chdir(root)
loadgconf()
def getprob():
path = pathlib.Path.cwd()
this = './'
for p in [path] + list(path.parents):
if p.joinpath('.p5c-prob').exists():
return (this, p.name)
this += '../'
return None
def setprob():
global pconf
pn = getprob()
if not pn: die('P5C: not a p5c problem directory')
os.chdir(pn[0])
pconf = load_meta(pn[0], default=dict())
loadgconf()
return pn[1]
def checkparam(s, pat=IDENTIFIER_PAT) :
if not re.fullmatch(pat, s) :
die("input should match pattern '{0}'".format(pat))
def readparam(prompt='', pat=None, default=None) :
flag = False
while not flag :
s = input(prompt)
if s == '' :
if default is not None :
s = default
flag = True
else :
printe("input should not be empty!")
elif pat is not None :
if not re.fullmatch(pat, s) :
printe("input should match pattern '{0}'".format(pat))
else :
flag = True
else :
flag = True
return s
def die(*args) :
printe(*args)
exit(1)
def commit(msg, path='.') :
if subprocess.call(['git', 'add', '--verbose', path]) != 0 :
die("git: failed to add changes")
if subprocess.call(['git', 'commit', '-am', msg]) != 0 :
die("git: failed to commit changes")
def load_meta(fname='meta.json', default=[]) :
if not pathlib.Path(fname).is_file() :
if pathlib.Path(fname + '/meta.json').is_file() :
fname += '/meta.json'
else :
json.dump(default, open(fname, 'w'), indent=2)
return json.load(open(fname))
def save_meta(meta, fname='meta.json') :
if pathlib.Path(fname).is_dir() : fname += '/meta.json'
json.dump(meta, open(fname, 'w'), indent=2) | 0.105879 | 0.076546 |
import numpy as np
from scipy import signal
from nn.module import Module
class Linear(Module):
"""
A module which applies a linear transformation
A common name is fully-connected layer, InnerProductLayer in caffe.
The module should work with 2D input of shape (n_samples, n_feature).
"""
def __init__(self, n_in, n_out):
super().__init__()
# This is a nice initialization
stdv = 1. / np.sqrt(n_in)
self.W = np.random.uniform(-stdv, stdv, size=(n_out, n_in))
self.b = np.random.uniform(-stdv, stdv, size=n_out)
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
self.output = np.dot(input, self.W.T)
np.add(self.output, self.b, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.dot(gradOutput, self.W)
return self.gradInput
def accGradParameters(self, input, gradOutput):
batch_grads = np.multiply(gradOutput[:, :, None], input[:, None, :])
np.mean(batch_grads, axis=0, out=self.gradW)
np.mean(gradOutput, axis=0, out=self.gradb)
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Linear %d -> %d' % (s[1], s[0])
return q
class SoftMax(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
# normalization for numerical stability
self.output = np.subtract(input, np.max(input, axis=1, keepdims=True))
np.exp(self.output, out=self.output)
np.divide(self.output, self.output.sum(axis=1, keepdims=True), out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput - np.sum(np.multiply(self.output, gradOutput), axis=1, keepdims=True)
np.multiply(self.output, self.gradInput, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "SoftMax"
class LogSoftMax(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
# normalization for numerical stability
self.output = np.subtract(input, input.max(axis=1, keepdims=True))
log_sum = np.log(np.sum(np.exp(self.output), axis=1, keepdims=True))
np.subtract(self.output, log_sum, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.subtract(input, input.max(axis=1, keepdims=True))
self.gradInput = np.exp(self.gradInput, out=self.gradInput)
exp_sum = self.gradInput.sum(axis=1, keepdims=True)
np.divide(self.gradInput, exp_sum, out=self.gradInput)
np.multiply(self.gradInput, np.sum(gradOutput, axis=1, keepdims=True), out=self.gradInput)
np.subtract(gradOutput, self.gradInput, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "LogSoftMax"
class BatchNormalization(Module):
EPS = 1e-3
def __init__(self, alpha=0.2):
super().__init__()
self.alpha = alpha
self.moving_mean = None
self.moving_variance = None
def updateOutput(self, input):
if self.moving_mean is None:
self.moving_mean = np.zeros((1, input.shape[1]))
self.moving_variance = np.zeros((1, input.shape[1]))
if self.training:
batch_mean = np.mean(input, axis=0, keepdims=True)
batch_variance = np.var(input, axis=0, keepdims=True)
self.moving_mean = np.subtract(self.moving_mean, batch_mean)
np.multiply(self.alpha, self.moving_mean, out=self.moving_mean)
np.add(self.moving_mean, batch_mean, out=self.moving_mean)
self.moving_variance = np.subtract(self.moving_variance, batch_variance)
np.multiply(self.alpha, self.moving_variance, out=self.moving_variance)
np.add(self.moving_variance, batch_variance, out=self.moving_variance)
mean = batch_mean
var = batch_variance
else:
mean = self.moving_mean
var = self.moving_variance
self.output = input - mean
np.divide(self.output, np.sqrt(var + self.EPS), out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
batch_mean = np.mean(input, axis=0, keepdims=True)
batch_variance_eps = np.var(input, axis=0, keepdims=True)
np.add(batch_variance_eps, self.EPS, out=batch_variance_eps)
grad_var = np.sum(np.multiply(gradOutput, input - batch_mean), axis=0, keepdims=True)
np.multiply(-0.5, grad_var, out=grad_var)
np.divide(grad_var, np.power(batch_variance_eps, 1.5), out=grad_var)
grad_mean = np.sum(gradOutput, axis=0, keepdims=True)
np.multiply(-1, grad_mean, out=grad_mean)
np.divide(grad_mean, np.power(batch_variance_eps, 0.5), out=grad_mean)
self.gradInput = np.subtract(input, batch_mean)
np.multiply(2, self.gradInput, out=self.gradInput)
np.multiply(grad_var, self.gradInput, out=self.gradInput)
np.add(grad_mean, self.gradInput, out=self.gradInput)
np.divide(self.gradInput, input.shape[0], out=self.gradInput)
np.add(self.gradInput, np.divide(gradOutput, np.power(batch_variance_eps, 0.5)), out=self.gradInput)
return self.gradInput
def __repr__(self):
return "BatchNormalization"
class ChannelwiseScaling(Module):
"""
Implements linear transform of input y = \gamma * x + \beta
where \gamma, \beta - learnable vectors of length x.shape[-1]
"""
def __init__(self, n_out):
super().__init__()
stdv = 1. / np.sqrt(n_out)
self.gamma = np.random.uniform(-stdv, stdv, size=n_out)
self.beta = np.random.uniform(-stdv, stdv, size=n_out)
self.gradGamma = np.zeros_like(self.gamma)
self.gradBeta = np.zeros_like(self.beta)
def updateOutput(self, input):
self.output = input * self.gamma + self.beta
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput * self.gamma
return self.gradInput
def accGradParameters(self, input, gradOutput):
self.gradBeta = np.sum(gradOutput, axis=0)
self.gradGamma = np.sum(gradOutput * input, axis=0)
def zeroGradParameters(self):
self.gradGamma.fill(0)
self.gradBeta.fill(0)
def getParameters(self):
return [self.gamma, self.beta]
def getGradParameters(self):
return [self.gradGamma, self.gradBeta]
def __repr__(self):
return "ChannelwiseScaling"
class Dropout(Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
self.mask = None
def updateOutput(self, input):
if self.training:
self.mask = np.random.choice([0, 1], input.shape, p=[1 - self.p, self.p])
self.output = np.multiply(input, self.mask)
np.divide(self.output, self.p, out=self.output)
else:
self.output = input
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.multiply(gradOutput, self.mask)
np.divide(self.gradInput, self.p, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "Dropout"
class Conv2d(Module):
def __init__(self, in_channels, out_channels, kernel_size):
super().__init__()
assert kernel_size % 2 == 1
stdv = 1. / np.sqrt(in_channels)
self.W = np.random.uniform(-stdv, stdv, size=(out_channels, in_channels, kernel_size, kernel_size))
self.b = np.random.uniform(-stdv, stdv, size=(out_channels,))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
pad_size = self.kernel_size // 2
pad_input = np.pad(
input,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
self.output = []
for s in range(self.out_channels):
out_channel = signal.correlate(pad_input, self.W[s:s+1], mode='valid')
np.add(out_channel, self.b[s], out=out_channel)
self.output.append(out_channel)
self.output = np.stack(self.output, axis=1).squeeze()
return self.output
def updateGradInput(self, input, gradOutput):
pad_size = self.kernel_size // 2
pad_grad = np.pad(
gradOutput,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
self.gradInput = []
for k in range(self.in_channels):
in_grad = signal.correlate(pad_grad, self.W[:, k][None, :, ::-1, ::-1], mode='valid')
self.gradInput.append(in_grad)
self.gradInput = np.stack(self.gradInput, axis=1).squeeze()
return self.gradInput
def accGradParameters(self, input, gradOutput):
pad_size = self.kernel_size // 2
pad_input = np.pad(
input,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
for s in range(self.out_channels):
w_grad = signal.correlate(pad_input, gradOutput[:, s:s+1], mode='valid')
self.gradW[s] = np.mean(w_grad, axis=0)
self.gradb[s] = np.mean(gradOutput[:, s].sum(axis=(1, 2)))
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Conv2d %d -> %d' % (s[1], s[0])
return q
class MaxPool2d(Module):
def __init__(self, kernel_size):
super().__init__()
self.kernel_size = kernel_size
self.gradInput = None
def updateOutput(self, input):
input_h, input_w = input.shape[-2:]
assert input_h % self.kernel_size == 0
assert input_w % self.kernel_size == 0
reshaped_input = input.reshape(
input.shape[0],
input.shape[1],
input_h // self.kernel_size,
self.kernel_size,
input_w // self.kernel_size,
self.kernel_size
)
reshaped_input = np.swapaxes(reshaped_input, 3, 4)
self.output = np.max(reshaped_input, axis=(4, 5))
self.max_indices = self.output[:, :, :, :, None, None] == reshaped_input
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = self.max_indices * gradOutput[:, :, :, :, None, None]
self.gradInput = np.swapaxes(self.gradInput, 3, 4)
self.gradInput = self.gradInput.reshape(input.shape)
return self.gradInput
def __repr__(self):
q = 'MaxPool2d, kern %d, stride %d' % (self.kernel_size, self.kernel_size)
return q
class Flatten(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
self.output = input.reshape(len(input), -1)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput.reshape(input.shape)
return self.gradInput
def __repr__(self):
return "Flatten" | nn/layers.py | import numpy as np
from scipy import signal
from nn.module import Module
class Linear(Module):
"""
A module which applies a linear transformation
A common name is fully-connected layer, InnerProductLayer in caffe.
The module should work with 2D input of shape (n_samples, n_feature).
"""
def __init__(self, n_in, n_out):
super().__init__()
# This is a nice initialization
stdv = 1. / np.sqrt(n_in)
self.W = np.random.uniform(-stdv, stdv, size=(n_out, n_in))
self.b = np.random.uniform(-stdv, stdv, size=n_out)
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
self.output = np.dot(input, self.W.T)
np.add(self.output, self.b, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.dot(gradOutput, self.W)
return self.gradInput
def accGradParameters(self, input, gradOutput):
batch_grads = np.multiply(gradOutput[:, :, None], input[:, None, :])
np.mean(batch_grads, axis=0, out=self.gradW)
np.mean(gradOutput, axis=0, out=self.gradb)
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Linear %d -> %d' % (s[1], s[0])
return q
class SoftMax(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
# normalization for numerical stability
self.output = np.subtract(input, np.max(input, axis=1, keepdims=True))
np.exp(self.output, out=self.output)
np.divide(self.output, self.output.sum(axis=1, keepdims=True), out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput - np.sum(np.multiply(self.output, gradOutput), axis=1, keepdims=True)
np.multiply(self.output, self.gradInput, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "SoftMax"
class LogSoftMax(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
# normalization for numerical stability
self.output = np.subtract(input, input.max(axis=1, keepdims=True))
log_sum = np.log(np.sum(np.exp(self.output), axis=1, keepdims=True))
np.subtract(self.output, log_sum, out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.subtract(input, input.max(axis=1, keepdims=True))
self.gradInput = np.exp(self.gradInput, out=self.gradInput)
exp_sum = self.gradInput.sum(axis=1, keepdims=True)
np.divide(self.gradInput, exp_sum, out=self.gradInput)
np.multiply(self.gradInput, np.sum(gradOutput, axis=1, keepdims=True), out=self.gradInput)
np.subtract(gradOutput, self.gradInput, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "LogSoftMax"
class BatchNormalization(Module):
EPS = 1e-3
def __init__(self, alpha=0.2):
super().__init__()
self.alpha = alpha
self.moving_mean = None
self.moving_variance = None
def updateOutput(self, input):
if self.moving_mean is None:
self.moving_mean = np.zeros((1, input.shape[1]))
self.moving_variance = np.zeros((1, input.shape[1]))
if self.training:
batch_mean = np.mean(input, axis=0, keepdims=True)
batch_variance = np.var(input, axis=0, keepdims=True)
self.moving_mean = np.subtract(self.moving_mean, batch_mean)
np.multiply(self.alpha, self.moving_mean, out=self.moving_mean)
np.add(self.moving_mean, batch_mean, out=self.moving_mean)
self.moving_variance = np.subtract(self.moving_variance, batch_variance)
np.multiply(self.alpha, self.moving_variance, out=self.moving_variance)
np.add(self.moving_variance, batch_variance, out=self.moving_variance)
mean = batch_mean
var = batch_variance
else:
mean = self.moving_mean
var = self.moving_variance
self.output = input - mean
np.divide(self.output, np.sqrt(var + self.EPS), out=self.output)
return self.output
def updateGradInput(self, input, gradOutput):
batch_mean = np.mean(input, axis=0, keepdims=True)
batch_variance_eps = np.var(input, axis=0, keepdims=True)
np.add(batch_variance_eps, self.EPS, out=batch_variance_eps)
grad_var = np.sum(np.multiply(gradOutput, input - batch_mean), axis=0, keepdims=True)
np.multiply(-0.5, grad_var, out=grad_var)
np.divide(grad_var, np.power(batch_variance_eps, 1.5), out=grad_var)
grad_mean = np.sum(gradOutput, axis=0, keepdims=True)
np.multiply(-1, grad_mean, out=grad_mean)
np.divide(grad_mean, np.power(batch_variance_eps, 0.5), out=grad_mean)
self.gradInput = np.subtract(input, batch_mean)
np.multiply(2, self.gradInput, out=self.gradInput)
np.multiply(grad_var, self.gradInput, out=self.gradInput)
np.add(grad_mean, self.gradInput, out=self.gradInput)
np.divide(self.gradInput, input.shape[0], out=self.gradInput)
np.add(self.gradInput, np.divide(gradOutput, np.power(batch_variance_eps, 0.5)), out=self.gradInput)
return self.gradInput
def __repr__(self):
return "BatchNormalization"
class ChannelwiseScaling(Module):
"""
Implements linear transform of input y = \gamma * x + \beta
where \gamma, \beta - learnable vectors of length x.shape[-1]
"""
def __init__(self, n_out):
super().__init__()
stdv = 1. / np.sqrt(n_out)
self.gamma = np.random.uniform(-stdv, stdv, size=n_out)
self.beta = np.random.uniform(-stdv, stdv, size=n_out)
self.gradGamma = np.zeros_like(self.gamma)
self.gradBeta = np.zeros_like(self.beta)
def updateOutput(self, input):
self.output = input * self.gamma + self.beta
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput * self.gamma
return self.gradInput
def accGradParameters(self, input, gradOutput):
self.gradBeta = np.sum(gradOutput, axis=0)
self.gradGamma = np.sum(gradOutput * input, axis=0)
def zeroGradParameters(self):
self.gradGamma.fill(0)
self.gradBeta.fill(0)
def getParameters(self):
return [self.gamma, self.beta]
def getGradParameters(self):
return [self.gradGamma, self.gradBeta]
def __repr__(self):
return "ChannelwiseScaling"
class Dropout(Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
self.mask = None
def updateOutput(self, input):
if self.training:
self.mask = np.random.choice([0, 1], input.shape, p=[1 - self.p, self.p])
self.output = np.multiply(input, self.mask)
np.divide(self.output, self.p, out=self.output)
else:
self.output = input
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = np.multiply(gradOutput, self.mask)
np.divide(self.gradInput, self.p, out=self.gradInput)
return self.gradInput
def __repr__(self):
return "Dropout"
class Conv2d(Module):
def __init__(self, in_channels, out_channels, kernel_size):
super().__init__()
assert kernel_size % 2 == 1
stdv = 1. / np.sqrt(in_channels)
self.W = np.random.uniform(-stdv, stdv, size=(out_channels, in_channels, kernel_size, kernel_size))
self.b = np.random.uniform(-stdv, stdv, size=(out_channels,))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.gradW = np.zeros_like(self.W)
self.gradb = np.zeros_like(self.b)
def updateOutput(self, input):
pad_size = self.kernel_size // 2
pad_input = np.pad(
input,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
self.output = []
for s in range(self.out_channels):
out_channel = signal.correlate(pad_input, self.W[s:s+1], mode='valid')
np.add(out_channel, self.b[s], out=out_channel)
self.output.append(out_channel)
self.output = np.stack(self.output, axis=1).squeeze()
return self.output
def updateGradInput(self, input, gradOutput):
pad_size = self.kernel_size // 2
pad_grad = np.pad(
gradOutput,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
self.gradInput = []
for k in range(self.in_channels):
in_grad = signal.correlate(pad_grad, self.W[:, k][None, :, ::-1, ::-1], mode='valid')
self.gradInput.append(in_grad)
self.gradInput = np.stack(self.gradInput, axis=1).squeeze()
return self.gradInput
def accGradParameters(self, input, gradOutput):
pad_size = self.kernel_size // 2
pad_input = np.pad(
input,
pad_width=[(0, 0), (0, 0), (pad_size, pad_size), (pad_size, pad_size)]
)
for s in range(self.out_channels):
w_grad = signal.correlate(pad_input, gradOutput[:, s:s+1], mode='valid')
self.gradW[s] = np.mean(w_grad, axis=0)
self.gradb[s] = np.mean(gradOutput[:, s].sum(axis=(1, 2)))
def zeroGradParameters(self):
self.gradW.fill(0)
self.gradb.fill(0)
def getParameters(self):
return [self.W, self.b]
def getGradParameters(self):
return [self.gradW, self.gradb]
def __repr__(self):
s = self.W.shape
q = 'Conv2d %d -> %d' % (s[1], s[0])
return q
class MaxPool2d(Module):
def __init__(self, kernel_size):
super().__init__()
self.kernel_size = kernel_size
self.gradInput = None
def updateOutput(self, input):
input_h, input_w = input.shape[-2:]
assert input_h % self.kernel_size == 0
assert input_w % self.kernel_size == 0
reshaped_input = input.reshape(
input.shape[0],
input.shape[1],
input_h // self.kernel_size,
self.kernel_size,
input_w // self.kernel_size,
self.kernel_size
)
reshaped_input = np.swapaxes(reshaped_input, 3, 4)
self.output = np.max(reshaped_input, axis=(4, 5))
self.max_indices = self.output[:, :, :, :, None, None] == reshaped_input
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = self.max_indices * gradOutput[:, :, :, :, None, None]
self.gradInput = np.swapaxes(self.gradInput, 3, 4)
self.gradInput = self.gradInput.reshape(input.shape)
return self.gradInput
def __repr__(self):
q = 'MaxPool2d, kern %d, stride %d' % (self.kernel_size, self.kernel_size)
return q
class Flatten(Module):
def __init__(self):
super().__init__()
def updateOutput(self, input):
self.output = input.reshape(len(input), -1)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = gradOutput.reshape(input.shape)
return self.gradInput
def __repr__(self):
return "Flatten" | 0.916433 | 0.448185 |
import time
import pandas as pd
from pandas.testing import assert_frame_equal
from simplepipeline.pipeline import Pipeline
from simplepipeline.map.parallel import par_map, par_map_unpack
from simplepipeline.map.sequential import seq_map, seq_map_unpack
from simplepipeline.task import task, filter_task, set_pipeline, get_pipeline, delete_pipeline
test_list = list(range(1, 5))
@task
def loops(x, constant):
time.sleep(0.1)
return x + constant
@task
def loops_several_arguments(x, constant):
time.sleep(0.1)
return x + constant, x * constant
@filter_task
def df_filter(df):
return df.iloc[-1:]
@filter_task
def list_filter(lst):
new_list = []
for i in range(len(lst)):
if i % 2 == 0:
new_list.append(i)
return new_list
def test_task_exec():
assert loops(1, 2).run() == 3
def test_task_sequential_exec():
expected = [3, 4, 5, 6]
assert seq_map(loops, test_list, constant=2).run() == expected
def test_task_parallel_exec():
expected = [3, 4, 5, 6]
assert par_map(loops, test_list, constant=2).run() == expected
def test_multiple_return_values_seq():
x, y = seq_map_unpack(loops_several_arguments, test_list, constant=2).run()
assert x == [3, 4, 5, 6] and y == [2, 4, 6, 8]
def test_multiple_return_values_par():
x, y = par_map_unpack(loops_several_arguments, test_list, constant=2).run()
assert x == [3, 4, 5, 6] and y == [2, 4, 6, 8]
def test_df_filter():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
result = df_filter(df).run().reset_index(drop=True)
expected = pd.DataFrame({"a": [5]}).reset_index(drop=True)
assert_frame_equal(result, expected)
def test_list_filter():
inp = list(range(0, 10))
result = list_filter(inp).run()
assert len(result) == 5
def test_pipeline():
set_pipeline(Pipeline("test_pipeline"))
assert loops(1, 2).run() == 3
pipeline = get_pipeline()
assert pipeline.executed_tasks() == ["loops"]
delete_pipeline()
assert get_pipeline() is None
def test_pipeline_executed_tasks():
set_pipeline(Pipeline("test_pipeline"))
seq_map(loops, test_list, constant=2).run()
assert get_pipeline().executed_tasks() == ["loops"] | tests/test_all.py | import time
import pandas as pd
from pandas.testing import assert_frame_equal
from simplepipeline.pipeline import Pipeline
from simplepipeline.map.parallel import par_map, par_map_unpack
from simplepipeline.map.sequential import seq_map, seq_map_unpack
from simplepipeline.task import task, filter_task, set_pipeline, get_pipeline, delete_pipeline
test_list = list(range(1, 5))
@task
def loops(x, constant):
time.sleep(0.1)
return x + constant
@task
def loops_several_arguments(x, constant):
time.sleep(0.1)
return x + constant, x * constant
@filter_task
def df_filter(df):
return df.iloc[-1:]
@filter_task
def list_filter(lst):
new_list = []
for i in range(len(lst)):
if i % 2 == 0:
new_list.append(i)
return new_list
def test_task_exec():
assert loops(1, 2).run() == 3
def test_task_sequential_exec():
expected = [3, 4, 5, 6]
assert seq_map(loops, test_list, constant=2).run() == expected
def test_task_parallel_exec():
expected = [3, 4, 5, 6]
assert par_map(loops, test_list, constant=2).run() == expected
def test_multiple_return_values_seq():
x, y = seq_map_unpack(loops_several_arguments, test_list, constant=2).run()
assert x == [3, 4, 5, 6] and y == [2, 4, 6, 8]
def test_multiple_return_values_par():
x, y = par_map_unpack(loops_several_arguments, test_list, constant=2).run()
assert x == [3, 4, 5, 6] and y == [2, 4, 6, 8]
def test_df_filter():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
result = df_filter(df).run().reset_index(drop=True)
expected = pd.DataFrame({"a": [5]}).reset_index(drop=True)
assert_frame_equal(result, expected)
def test_list_filter():
inp = list(range(0, 10))
result = list_filter(inp).run()
assert len(result) == 5
def test_pipeline():
set_pipeline(Pipeline("test_pipeline"))
assert loops(1, 2).run() == 3
pipeline = get_pipeline()
assert pipeline.executed_tasks() == ["loops"]
delete_pipeline()
assert get_pipeline() is None
def test_pipeline_executed_tasks():
set_pipeline(Pipeline("test_pipeline"))
seq_map(loops, test_list, constant=2).run()
assert get_pipeline().executed_tasks() == ["loops"] | 0.445047 | 0.632843 |
import copy
import functools
import getpass
import os
from json import JSONDecodeError
from multiprocessing.pool import ThreadPool
from random import shuffle
from typing import Dict, List
import uuid
from requests import Response
from tqdm import tqdm
from vortexasdk.abstract_client import AbstractVortexaClient
from vortexasdk.api.id import ID
from vortexasdk.endpoints.endpoints import API_URL
from vortexasdk.logger import get_logger
from vortexasdk.retry_session import (
retry_get,
retry_post,
)
from vortexasdk.utils import filter_empty_values
from vortexasdk.version_utils import is_sdk_version_outdated
from vortexasdk.version import __version__
from vortexasdk import __name__ as sdk_pkg_name
logger = get_logger(__name__)
class VortexaClient(AbstractVortexaClient):
"""The API client responsible for calling Vortexa's Public API."""
_DEFAULT_PAGE_LOAD_SIZE = int(1e4)
_N_THREADS = 6
_MAX_ALLOWED_TOTAL = int(1e6)
def __init__(self, **kwargs):
self.api_key = kwargs["api_key"]
def get_reference(self, resource: str, id: ID) -> List[Dict]:
"""Lookup reference data."""
url = self._create_url(f"{resource}/{id}")
response = retry_get(url)
return _handle_response(response)["data"]
def search(self, resource: str, response_type: str, **data) -> List:
"""Search using `resource` using `**data` as filter params."""
url = self._create_url(resource)
payload = self._cleanse_payload(data)
logger.info(f"Payload: {payload}")
# breakdowns do not support paging, the breakdown size is specified explicitly as a request parameter
if response_type == "breakdown":
size = payload.get("breakdown_size", 1000)
response = _send_post_request(url, payload, size=size, offset=0)
ref = response.get("reference", {})
if ref:
return response
else:
return response["data"]
probe_response = _send_post_request(url, payload, size=1, offset=0)
total = self._calculate_total(probe_response)
if total > self._MAX_ALLOWED_TOTAL:
raise Exception(
f"Attempting to query too many records at once. Attempted records: {total}, Max allowed records: {self._MAX_ALLOWED_TOTAL} . "
f"Try reducing the date range to return fewer records."
)
elif total == 1:
# Only one page response, no need to send another request, so return flattened response
return probe_response["data"]
else:
# Multiple pages available, create offsets and fetch all responses
responses = self._process_multiple_pages(
total=total, url=url, payload=payload, data=data
)
flattened = self._flatten_response(responses)
assert len(flattened) == total, (
f"Incorrect number of records returned from API. "
f"Actual: {len(flattened)}, expected: {total}"
)
return flattened
def _create_url(self, path: str) -> str:
return (
f"{API_URL}{path}?_sdk=python_v{__version__}&apikey={self.api_key}"
)
def _process_multiple_pages(
self, total: int, url: str, payload: Dict, data: Dict
) -> List:
size = data.get("size", 1000)
offsets = list(range(0, total, size))
shuffle(offsets)
with tqdm(
total=total, desc="Loading from API", disable=(len(offsets) == 1)
) as pbar:
with ThreadPool(self._N_THREADS) as pool:
logger.info(
f"{total} Results to retrieve."
f" Sending {len(offsets)}"
f" post requests in parallel using {self._N_THREADS} threads."
)
func = functools.partial(
_send_post_request_data,
url=url,
payload=payload,
size=size,
progress_bar=pbar,
)
return pool.map(func, offsets)
@staticmethod
def _cleanse_payload(payload: Dict) -> Dict:
exclude_params = payload.get("exclude", {})
payload["exclude"] = filter_empty_values(exclude_params)
return filter_empty_values(payload)
@staticmethod
def _calculate_total(response: Dict) -> int:
""" Get total number of pages, if total key does not exist, return 1 """
return response.get("total", 1)
@staticmethod
def _flatten_response(response) -> List:
return [x for y in response for x in y]
def _send_post_request_data(
offset, url, payload, size, progress_bar: tqdm
) -> List:
# noinspection PyBroadException
try:
progress_bar.update(size)
except Exception:
logger.warn("Could not update progress bar")
dict_response = _send_post_request(url, payload, size, offset)
return dict_response.get("data", [])
def _send_post_request(url, payload, size, offset) -> Dict:
logger.debug(f"Sending post request, offset: {offset}, size: {size}")
payload_with_offset = copy.deepcopy(payload)
payload_with_offset["offset"] = offset
payload_with_offset["cm_offset"] = offset
payload_with_offset["size"] = size
payload_with_offset["cm_size"] = size
response = retry_post(url, json=payload_with_offset)
return _handle_response(response, payload_with_offset)
def _handle_response(response: Response, payload: Dict = None) -> Dict:
if not response.ok:
logger.error(response.reason)
logger.error(response.status_code)
logger.error(response)
# noinspection PyBroadException
try:
logger.error(response.json())
message = response.json()["message"]
except Exception:
message = ""
pass
logger.error(f"payload: {payload}")
error = f"[{response.status_code} {response.reason}]"
raise ValueError(f"{error} {message}")
else:
try:
json = response.json()
except JSONDecodeError:
logger.error("Could not decode response")
json = {}
except Exception as e:
logger.error(e)
json = {}
return json
__client__ = None
def default_client() -> VortexaClient:
"""Instantiate VortexaClient as global variable."""
global __client__
if __client__ is None:
__client__ = create_client()
return __client__
def create_client() -> VortexaClient:
"""Create new VortexaClient."""
logger.info("Creating new VortexaClient")
api_key = _load_api_key()
verify_api_key_format(api_key)
_warn_user_if_sdk_version_outdated()
return VortexaClient(api_key=api_key)
def set_client(client) -> None:
"""Set the global client, used by all endpoints."""
global __client__
__client__ = client
logger.debug(
f"global __client__ has been set {__client__.__class__.__name__} \n"
)
def _warn_user_if_sdk_version_outdated() -> None:
"""Warn users if their SDK version is outdated"""
try:
latest_sdk_version, sdk_outdated_check = is_sdk_version_outdated()
if sdk_outdated_check:
logger.warning(
f"You are using {sdk_pkg_name} version {__version__}, however version {latest_sdk_version} is available.\n"
f"You should consider upgrading via the 'pip install {sdk_pkg_name} --upgrade' command."
)
except Exception:
logger.warning(
f"Outdated SDK version check could not be completed. \n"
)
def _load_api_key():
"""Read API Key from environment variables else user input"""
try:
return os.environ["VORTEXA_API_KEY"]
except KeyError:
return getpass.getpass("Vortexa API Key: ")
except Exception:
raise KeyError(
"You must either set the VORTEXA_API_KEY environment variable, or interactively enter your Vortexa API key."
" Your API key can be found at https://docs.vortexa.com"
)
def verify_api_key_format(api_key: str) -> None:
"""Verify that the api_key is a valid UUID string"""
try:
uuid.UUID(api_key)
except ValueError:
raise ValueError(
"Incorrect API key set. The Vortexa API key must be of the form <KEY> "
" Your API key can be found at https://docs.vortexa.com"
) | vortexasdk/client.py | import copy
import functools
import getpass
import os
from json import JSONDecodeError
from multiprocessing.pool import ThreadPool
from random import shuffle
from typing import Dict, List
import uuid
from requests import Response
from tqdm import tqdm
from vortexasdk.abstract_client import AbstractVortexaClient
from vortexasdk.api.id import ID
from vortexasdk.endpoints.endpoints import API_URL
from vortexasdk.logger import get_logger
from vortexasdk.retry_session import (
retry_get,
retry_post,
)
from vortexasdk.utils import filter_empty_values
from vortexasdk.version_utils import is_sdk_version_outdated
from vortexasdk.version import __version__
from vortexasdk import __name__ as sdk_pkg_name
logger = get_logger(__name__)
class VortexaClient(AbstractVortexaClient):
"""The API client responsible for calling Vortexa's Public API."""
_DEFAULT_PAGE_LOAD_SIZE = int(1e4)
_N_THREADS = 6
_MAX_ALLOWED_TOTAL = int(1e6)
def __init__(self, **kwargs):
self.api_key = kwargs["api_key"]
def get_reference(self, resource: str, id: ID) -> List[Dict]:
"""Lookup reference data."""
url = self._create_url(f"{resource}/{id}")
response = retry_get(url)
return _handle_response(response)["data"]
def search(self, resource: str, response_type: str, **data) -> List:
"""Search using `resource` using `**data` as filter params."""
url = self._create_url(resource)
payload = self._cleanse_payload(data)
logger.info(f"Payload: {payload}")
# breakdowns do not support paging, the breakdown size is specified explicitly as a request parameter
if response_type == "breakdown":
size = payload.get("breakdown_size", 1000)
response = _send_post_request(url, payload, size=size, offset=0)
ref = response.get("reference", {})
if ref:
return response
else:
return response["data"]
probe_response = _send_post_request(url, payload, size=1, offset=0)
total = self._calculate_total(probe_response)
if total > self._MAX_ALLOWED_TOTAL:
raise Exception(
f"Attempting to query too many records at once. Attempted records: {total}, Max allowed records: {self._MAX_ALLOWED_TOTAL} . "
f"Try reducing the date range to return fewer records."
)
elif total == 1:
# Only one page response, no need to send another request, so return flattened response
return probe_response["data"]
else:
# Multiple pages available, create offsets and fetch all responses
responses = self._process_multiple_pages(
total=total, url=url, payload=payload, data=data
)
flattened = self._flatten_response(responses)
assert len(flattened) == total, (
f"Incorrect number of records returned from API. "
f"Actual: {len(flattened)}, expected: {total}"
)
return flattened
def _create_url(self, path: str) -> str:
return (
f"{API_URL}{path}?_sdk=python_v{__version__}&apikey={self.api_key}"
)
def _process_multiple_pages(
self, total: int, url: str, payload: Dict, data: Dict
) -> List:
size = data.get("size", 1000)
offsets = list(range(0, total, size))
shuffle(offsets)
with tqdm(
total=total, desc="Loading from API", disable=(len(offsets) == 1)
) as pbar:
with ThreadPool(self._N_THREADS) as pool:
logger.info(
f"{total} Results to retrieve."
f" Sending {len(offsets)}"
f" post requests in parallel using {self._N_THREADS} threads."
)
func = functools.partial(
_send_post_request_data,
url=url,
payload=payload,
size=size,
progress_bar=pbar,
)
return pool.map(func, offsets)
@staticmethod
def _cleanse_payload(payload: Dict) -> Dict:
exclude_params = payload.get("exclude", {})
payload["exclude"] = filter_empty_values(exclude_params)
return filter_empty_values(payload)
@staticmethod
def _calculate_total(response: Dict) -> int:
""" Get total number of pages, if total key does not exist, return 1 """
return response.get("total", 1)
@staticmethod
def _flatten_response(response) -> List:
return [x for y in response for x in y]
def _send_post_request_data(
offset, url, payload, size, progress_bar: tqdm
) -> List:
# noinspection PyBroadException
try:
progress_bar.update(size)
except Exception:
logger.warn("Could not update progress bar")
dict_response = _send_post_request(url, payload, size, offset)
return dict_response.get("data", [])
def _send_post_request(url, payload, size, offset) -> Dict:
logger.debug(f"Sending post request, offset: {offset}, size: {size}")
payload_with_offset = copy.deepcopy(payload)
payload_with_offset["offset"] = offset
payload_with_offset["cm_offset"] = offset
payload_with_offset["size"] = size
payload_with_offset["cm_size"] = size
response = retry_post(url, json=payload_with_offset)
return _handle_response(response, payload_with_offset)
def _handle_response(response: Response, payload: Dict = None) -> Dict:
if not response.ok:
logger.error(response.reason)
logger.error(response.status_code)
logger.error(response)
# noinspection PyBroadException
try:
logger.error(response.json())
message = response.json()["message"]
except Exception:
message = ""
pass
logger.error(f"payload: {payload}")
error = f"[{response.status_code} {response.reason}]"
raise ValueError(f"{error} {message}")
else:
try:
json = response.json()
except JSONDecodeError:
logger.error("Could not decode response")
json = {}
except Exception as e:
logger.error(e)
json = {}
return json
__client__ = None
def default_client() -> VortexaClient:
"""Instantiate VortexaClient as global variable."""
global __client__
if __client__ is None:
__client__ = create_client()
return __client__
def create_client() -> VortexaClient:
"""Create new VortexaClient."""
logger.info("Creating new VortexaClient")
api_key = _load_api_key()
verify_api_key_format(api_key)
_warn_user_if_sdk_version_outdated()
return VortexaClient(api_key=api_key)
def set_client(client) -> None:
"""Set the global client, used by all endpoints."""
global __client__
__client__ = client
logger.debug(
f"global __client__ has been set {__client__.__class__.__name__} \n"
)
def _warn_user_if_sdk_version_outdated() -> None:
"""Warn users if their SDK version is outdated"""
try:
latest_sdk_version, sdk_outdated_check = is_sdk_version_outdated()
if sdk_outdated_check:
logger.warning(
f"You are using {sdk_pkg_name} version {__version__}, however version {latest_sdk_version} is available.\n"
f"You should consider upgrading via the 'pip install {sdk_pkg_name} --upgrade' command."
)
except Exception:
logger.warning(
f"Outdated SDK version check could not be completed. \n"
)
def _load_api_key():
"""Read API Key from environment variables else user input"""
try:
return os.environ["VORTEXA_API_KEY"]
except KeyError:
return getpass.getpass("Vortexa API Key: ")
except Exception:
raise KeyError(
"You must either set the VORTEXA_API_KEY environment variable, or interactively enter your Vortexa API key."
" Your API key can be found at https://docs.vortexa.com"
)
def verify_api_key_format(api_key: str) -> None:
"""Verify that the api_key is a valid UUID string"""
try:
uuid.UUID(api_key)
except ValueError:
raise ValueError(
"Incorrect API key set. The Vortexa API key must be of the form <KEY> "
" Your API key can be found at https://docs.vortexa.com"
) | 0.804252 | 0.12166 |
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from flask import request, jsonify
from database.db import mongo
from resources.students.student_proposal import curr_user_is_student
class EditProposal(Resource):
@jwt_required
def post(self):
current_user_email = get_jwt_identity()
if curr_user_is_student(current_user_email):
body = request.get_json()
students = mongo.db.students
student = students.find_one({'email': current_user_email})
if student:
students.update({'email': student['email']},
{"$set": {
'proposal_document_title_persian': body.get('title_persian'),
'proposal_document_title_english': body.get('title_english'),
'proposal_document_keywords_persian': body.get('keywords_persian'),
'proposal_document_keywords_english': body.get('keywords_english'),
'proposal_document_type': body.get('type'),
'proposal_document_definition': body.get('definition'),
'proposal_document_history': body.get('history'),
'proposal_document_how_to_solve': body.get('how_to_solve'),
'proposal_document_assumption': body.get('assumption'),
'proposal_document_is_new': body.get('is_new'),
'proposal_document_tools': body.get('tools'),
'proposal_document_supportive_reference': body.get('supportive_reference'),
'proposal_document_references': body.get('references'),
'proposal_document_references_other_languages':
body.get('references_other_languages'),
'proposal_document_time_table': body.get('time_table')
}})
out = "Proposal is updated successfully"
else:
out = "Student not found"
else:
out = "It's prof"
return jsonify({'out': out}) | app/resources/students/edit_proposal.py | from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from flask import request, jsonify
from database.db import mongo
from resources.students.student_proposal import curr_user_is_student
class EditProposal(Resource):
@jwt_required
def post(self):
current_user_email = get_jwt_identity()
if curr_user_is_student(current_user_email):
body = request.get_json()
students = mongo.db.students
student = students.find_one({'email': current_user_email})
if student:
students.update({'email': student['email']},
{"$set": {
'proposal_document_title_persian': body.get('title_persian'),
'proposal_document_title_english': body.get('title_english'),
'proposal_document_keywords_persian': body.get('keywords_persian'),
'proposal_document_keywords_english': body.get('keywords_english'),
'proposal_document_type': body.get('type'),
'proposal_document_definition': body.get('definition'),
'proposal_document_history': body.get('history'),
'proposal_document_how_to_solve': body.get('how_to_solve'),
'proposal_document_assumption': body.get('assumption'),
'proposal_document_is_new': body.get('is_new'),
'proposal_document_tools': body.get('tools'),
'proposal_document_supportive_reference': body.get('supportive_reference'),
'proposal_document_references': body.get('references'),
'proposal_document_references_other_languages':
body.get('references_other_languages'),
'proposal_document_time_table': body.get('time_table')
}})
out = "Proposal is updated successfully"
else:
out = "Student not found"
else:
out = "It's prof"
return jsonify({'out': out}) | 0.402157 | 0.05301 |
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class Offer(Page):
form_model = 'group'
form_fields = ['offer']
def is_displayed(self):
if self.participant.vars['is_finished']:
return False
else:
return self.player.role() == 'proposer'
class WaitOffer(WaitPage):
pass
class MatchInfo(Page):
timeout_seconds = 3
def is_displayed(self):
return self.round_number in [1,5]
def before_next_page(self):
self.participant.vars['is_finished'] = False
print("executred before_next_page")
class Response(Page):
form_model = 'group'
form_fields = ['response']
def js_vars(self):
return dict(offer = self.group.offer, current_pie = self.group.current_pie)
def is_displayed(self):
if self.participant.vars['is_finished']:
return False
else:
return self.player.role() == 'responder'
def before_next_page(self):
print("checking payoffs")
if self.group.response == "accept":
for p in self.group.get_players():
p.participant.vars['is_finished'] = True
if p.role() == 'proposer':
proposer_payoff = p.group.current_pie - p.group.offer
# p.payoff = proposer_payoff
p.participant.vars['payoff_list'].append(proposer_payoff)
if p.role() == 'responder':
# p.payoff = p.group.offer
responder_payoff = p.group.offer
p.participant.vars['payoff_list'].append(responder_payoff)
if self.group.response == "reject" and self.round_number in [4,8]:
for p in self.group.get_players():
p.participant.vars['payoff_list'].append(c(0))
class WaitResponse(WaitPage):
pass
class Results(Page):
def vars_for_template(self):
return dict(proposer_payoff = self.group.current_pie - self.group.offer)
def is_displayed(self):
print("payoff list for",self.participant.id_in_session, self.participant.vars['payoff_list'])
return self.group.response in ["accept", "reject"]
class BeforeFinalResults(WaitPage):
after_all_players_arrive = 'set_payoffs'
def is_displayed(self):
return self.round_number == Constants.num_rounds
class FinalResults(Page):
def is_displayed(self):
return self.round_number == Constants.num_rounds
page_sequence = [MatchInfo, Offer, WaitOffer, Response, WaitResponse, Results, BeforeFinalResults, FinalResults] | example_apps/rubinstein/pages.py | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class Offer(Page):
form_model = 'group'
form_fields = ['offer']
def is_displayed(self):
if self.participant.vars['is_finished']:
return False
else:
return self.player.role() == 'proposer'
class WaitOffer(WaitPage):
pass
class MatchInfo(Page):
timeout_seconds = 3
def is_displayed(self):
return self.round_number in [1,5]
def before_next_page(self):
self.participant.vars['is_finished'] = False
print("executred before_next_page")
class Response(Page):
form_model = 'group'
form_fields = ['response']
def js_vars(self):
return dict(offer = self.group.offer, current_pie = self.group.current_pie)
def is_displayed(self):
if self.participant.vars['is_finished']:
return False
else:
return self.player.role() == 'responder'
def before_next_page(self):
print("checking payoffs")
if self.group.response == "accept":
for p in self.group.get_players():
p.participant.vars['is_finished'] = True
if p.role() == 'proposer':
proposer_payoff = p.group.current_pie - p.group.offer
# p.payoff = proposer_payoff
p.participant.vars['payoff_list'].append(proposer_payoff)
if p.role() == 'responder':
# p.payoff = p.group.offer
responder_payoff = p.group.offer
p.participant.vars['payoff_list'].append(responder_payoff)
if self.group.response == "reject" and self.round_number in [4,8]:
for p in self.group.get_players():
p.participant.vars['payoff_list'].append(c(0))
class WaitResponse(WaitPage):
pass
class Results(Page):
def vars_for_template(self):
return dict(proposer_payoff = self.group.current_pie - self.group.offer)
def is_displayed(self):
print("payoff list for",self.participant.id_in_session, self.participant.vars['payoff_list'])
return self.group.response in ["accept", "reject"]
class BeforeFinalResults(WaitPage):
after_all_players_arrive = 'set_payoffs'
def is_displayed(self):
return self.round_number == Constants.num_rounds
class FinalResults(Page):
def is_displayed(self):
return self.round_number == Constants.num_rounds
page_sequence = [MatchInfo, Offer, WaitOffer, Response, WaitResponse, Results, BeforeFinalResults, FinalResults] | 0.249173 | 0.13084 |
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, escape, jsonify
import models as dbHandler
import os
import qrcode
import base64
import io
import random
app = Flask(__name__)
@app.route('/', methods =['GET','POST'])
def index():
return render_template('index.html')
@app.route('/team', methods=['GET','POST'])
def team():
return render_template('team.html')
@app.route('/register', methods=['POST', 'GET'])
def register():
error = None
if request.method=='POST':
sap_id = request.form['sap_id']
name=request.form['name']
email = request.form['email']
telephone= request.form['telephone']
username = request.form['username']
password = request.form['password']
error = dbHandler.register(sap_id,name,email,telephone,username,password)
if error == None:
return index()
else:
return render_template('register.html', error=error)
else:
return render_template('register.html')
@app.route('/rfidregister', methods=['POST','GET'])
def rfidregister():
error = None
if request.method=='POST':
sap_id = request.form['sap_id']
name=request.form['name']
email = request.form['email']
telephone= request.form['telephone']
branch = request.form['branch']
year = request.form['year']
rfidno = request.form['rfidno']
error = dbHandler.rfidregister(sap_id,name,email,telephone,branch,year,rfidno)
return render_template('rfidregister.html', error=error)
else:
return render_template('rfidregister.html')
@app.route('/report', methods=['POST','GET'])
def report():
data=dbHandler.getdata()
if data:
return render_template('rfidregister.html',data=data)
else:
return render_template('rfidregister.html')
@app.route('/login', methods=['POST','GET'])
def login():
error = None
if 'username' in session:
return redirect(url_for('profile'))
else:
if request.method == 'POST':
username_in = request.form['username']
password_in = request.form['password']
if username_in == 'admin' and password_in == '<PASSWORD>':
session['username'] = username_in
return redirect(url_for('rfidregister'))
else:
error = dbHandler.login(username_in,password_in)
if error == None:
session['username'] = username_in
return redirect(url_for('profile'))
else:
return render_template('login.html',error=error)
else:
return render_template('login.html')
@app.route('/profile', methods=['POST','GET'])
def profile():
if 'username' in session:
username_session = escape(session['username'])
return render_template('profile.html', session_user_name=username_session)
else:
return redirect(url_for('index'))
@app.route('/qrcodes', methods=['POST','GET'])
def qrcodes():
if 'username' in session:
username_session = escape(session['username'])
random_byte=os.urandom(10)
random_str=int(random_byte.hex(),16)
val = str(random_str)
dbHandler.dock_info(username_session,val)
img=qrcode.make(random_str)
buffer = io.BytesIO()
img.save(buffer, format="JPEG")
img_str = base64.b64encode(buffer.getvalue())
img_str = "data:image/jpeg;base64," + img_str.decode("utf-8")
return render_template('profile.html', session_user_name=username_session, code=img_str)
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.errorhandler(500)
def handle_error(error):
return render_template('error.html')
@app.route('/qrverify', methods = ['GET','POST'])
def qrverify():
data = request.get_json(force=True)
flag = dbHandler.qr_verify(data['string'])
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/updateds', methods = ['GET','POST'])
def updateds():
data = request.get_json(force=True)
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
qrstring = data['string']
flag = dbHandler.update_docking_st(cyclenm,dockst,dockno,qrstring)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/rfidauth', methods = ['GET','POST'])
def rfidauth():
data = request.get_json(force=True)
flag = dbHandler.rfid_verify(data['string'])
resp={'val':flag}
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/updaterfidtb', methods = ['GET','POST'])
def updaterfidtb():
data = request.get_json(force=True)
ride_id=data['ride_id']
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
rfidstring = data['string']
flag = dbHandler.update_rfid_tb(ride_id,cyclenm,dockst,dockno,rfidstring)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/endride', methods = ['GET','POST'])
def endride():
data = request.get_json(force=True)
ride_id = data['ride_id']
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
rfidstring = data['string']
# status = False
flag = dbHandler.end_ride(ride_id)
# dbHandler.updatecycle_pos(dockst,dockno,cyclenm,status)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.config['TRAP_HTTP_EXCEPTIONS']=True
app.run(debug=True,host='0.0.0.0', port=5000) | main.py | from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, escape, jsonify
import models as dbHandler
import os
import qrcode
import base64
import io
import random
app = Flask(__name__)
@app.route('/', methods =['GET','POST'])
def index():
return render_template('index.html')
@app.route('/team', methods=['GET','POST'])
def team():
return render_template('team.html')
@app.route('/register', methods=['POST', 'GET'])
def register():
error = None
if request.method=='POST':
sap_id = request.form['sap_id']
name=request.form['name']
email = request.form['email']
telephone= request.form['telephone']
username = request.form['username']
password = request.form['password']
error = dbHandler.register(sap_id,name,email,telephone,username,password)
if error == None:
return index()
else:
return render_template('register.html', error=error)
else:
return render_template('register.html')
@app.route('/rfidregister', methods=['POST','GET'])
def rfidregister():
error = None
if request.method=='POST':
sap_id = request.form['sap_id']
name=request.form['name']
email = request.form['email']
telephone= request.form['telephone']
branch = request.form['branch']
year = request.form['year']
rfidno = request.form['rfidno']
error = dbHandler.rfidregister(sap_id,name,email,telephone,branch,year,rfidno)
return render_template('rfidregister.html', error=error)
else:
return render_template('rfidregister.html')
@app.route('/report', methods=['POST','GET'])
def report():
data=dbHandler.getdata()
if data:
return render_template('rfidregister.html',data=data)
else:
return render_template('rfidregister.html')
@app.route('/login', methods=['POST','GET'])
def login():
error = None
if 'username' in session:
return redirect(url_for('profile'))
else:
if request.method == 'POST':
username_in = request.form['username']
password_in = request.form['password']
if username_in == 'admin' and password_in == '<PASSWORD>':
session['username'] = username_in
return redirect(url_for('rfidregister'))
else:
error = dbHandler.login(username_in,password_in)
if error == None:
session['username'] = username_in
return redirect(url_for('profile'))
else:
return render_template('login.html',error=error)
else:
return render_template('login.html')
@app.route('/profile', methods=['POST','GET'])
def profile():
if 'username' in session:
username_session = escape(session['username'])
return render_template('profile.html', session_user_name=username_session)
else:
return redirect(url_for('index'))
@app.route('/qrcodes', methods=['POST','GET'])
def qrcodes():
if 'username' in session:
username_session = escape(session['username'])
random_byte=os.urandom(10)
random_str=int(random_byte.hex(),16)
val = str(random_str)
dbHandler.dock_info(username_session,val)
img=qrcode.make(random_str)
buffer = io.BytesIO()
img.save(buffer, format="JPEG")
img_str = base64.b64encode(buffer.getvalue())
img_str = "data:image/jpeg;base64," + img_str.decode("utf-8")
return render_template('profile.html', session_user_name=username_session, code=img_str)
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.errorhandler(500)
def handle_error(error):
return render_template('error.html')
@app.route('/qrverify', methods = ['GET','POST'])
def qrverify():
data = request.get_json(force=True)
flag = dbHandler.qr_verify(data['string'])
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/updateds', methods = ['GET','POST'])
def updateds():
data = request.get_json(force=True)
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
qrstring = data['string']
flag = dbHandler.update_docking_st(cyclenm,dockst,dockno,qrstring)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/rfidauth', methods = ['GET','POST'])
def rfidauth():
data = request.get_json(force=True)
flag = dbHandler.rfid_verify(data['string'])
resp={'val':flag}
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/updaterfidtb', methods = ['GET','POST'])
def updaterfidtb():
data = request.get_json(force=True)
ride_id=data['ride_id']
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
rfidstring = data['string']
flag = dbHandler.update_rfid_tb(ride_id,cyclenm,dockst,dockno,rfidstring)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
@app.route('/endride', methods = ['GET','POST'])
def endride():
data = request.get_json(force=True)
ride_id = data['ride_id']
cyclenm = data['cyclenm']
dockst = data['dockst']
dockno = data['dockno']
rfidstring = data['string']
# status = False
flag = dbHandler.end_ride(ride_id)
# dbHandler.updatecycle_pos(dockst,dockno,cyclenm,status)
if flag == 1:
resp = {'val':1}
else:
resp = {'val':0}
return jsonify(resp)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.config['TRAP_HTTP_EXCEPTIONS']=True
app.run(debug=True,host='0.0.0.0', port=5000) | 0.105596 | 0.058804 |
from __future__ import absolute_import
from __future__ import print_function
from .offset_manager import OffsetWriter
from kafka_utils.util.client import KafkaToolClient
from kafka_utils.util.offsets import nullify_offsets
from kafka_utils.util.offsets import set_consumer_offsets
from kafka_utils.util.zookeeper import ZK
class DeleteGroup(OffsetWriter):
@classmethod
def setup_subparser(cls, subparsers):
parser_delete_group = subparsers.add_parser(
"delete_group",
description="Delete a consumer group by groupid. This "
"tool shall delete all group offset metadata from Zookeeper.",
add_help=False
)
parser_delete_group.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_delete_group.add_argument(
'groupid',
help="Consumer Group IDs whose metadata shall be deleted."
)
parser_delete_group.add_argument(
'--storage', choices=['zookeeper', 'kafka'],
help="String describing where to store the committed offsets.",
default='kafka',
)
parser_delete_group.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
args.groupid,
None,
None,
cluster_config,
client,
storage=args.storage,
)
if args.storage == 'zookeeper':
cls.delete_group_zk(cluster_config, args.groupid)
else:
cls.delete_group_kafka(client, args.groupid, topics_dict)
@classmethod
def delete_group_zk(cls, cluster_config, group):
with ZK(cluster_config) as zk:
zk.delete_group(group)
@classmethod
def delete_group_kafka(cls, client, group, topics):
new_offsets = nullify_offsets(topics)
set_consumer_offsets(
client,
group,
new_offsets,
offset_storage='kafka',
) | kafka_utils/kafka_consumer_manager/commands/delete_group.py | from __future__ import absolute_import
from __future__ import print_function
from .offset_manager import OffsetWriter
from kafka_utils.util.client import KafkaToolClient
from kafka_utils.util.offsets import nullify_offsets
from kafka_utils.util.offsets import set_consumer_offsets
from kafka_utils.util.zookeeper import ZK
class DeleteGroup(OffsetWriter):
@classmethod
def setup_subparser(cls, subparsers):
parser_delete_group = subparsers.add_parser(
"delete_group",
description="Delete a consumer group by groupid. This "
"tool shall delete all group offset metadata from Zookeeper.",
add_help=False
)
parser_delete_group.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_delete_group.add_argument(
'groupid',
help="Consumer Group IDs whose metadata shall be deleted."
)
parser_delete_group.add_argument(
'--storage', choices=['zookeeper', 'kafka'],
help="String describing where to store the committed offsets.",
default='kafka',
)
parser_delete_group.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
args.groupid,
None,
None,
cluster_config,
client,
storage=args.storage,
)
if args.storage == 'zookeeper':
cls.delete_group_zk(cluster_config, args.groupid)
else:
cls.delete_group_kafka(client, args.groupid, topics_dict)
@classmethod
def delete_group_zk(cls, cluster_config, group):
with ZK(cluster_config) as zk:
zk.delete_group(group)
@classmethod
def delete_group_kafka(cls, client, group, topics):
new_offsets = nullify_offsets(topics)
set_consumer_offsets(
client,
group,
new_offsets,
offset_storage='kafka',
) | 0.701815 | 0.073297 |
from gcsfs.utils import ChecksumError
from gcsfs.checkers import Crc32cChecker, MD5Checker, SizeChecker, crcmod
from hashlib import md5
import base64
import pytest
def google_response_from_data(expected_data: bytes, actual_data=None):
actual_data = actual_data or expected_data
checksum = md5(actual_data)
checksum_b64 = base64.b64encode(checksum.digest()).decode("UTF-8")
if crcmod is not None:
checksum = crcmod.Crc(0x11EDC6F41, initCrc=0, xorOut=0xFFFFFFFF)
checksum.update(actual_data)
crc = base64.b64encode(checksum.digest()).decode()
class response:
content_length = len(actual_data)
headers = {"X-Goog-Hash": f"md5={checksum_b64}"}
if crcmod is not None:
headers["X-Goog-Hash"] += f",crc32c={crc}"
return response
def google_json_response_from_data(expected_data: bytes, actual_data=None):
actual_data = actual_data or expected_data
checksum = md5(actual_data)
checksum_b64 = base64.b64encode(checksum.digest()).decode("UTF-8")
response = {"md5Hash": checksum_b64, "size": len(actual_data)}
# some manual checksums verified using gsutil ls -L
# also can add using https://crccalc.com/
# be careful about newlines
crc32c_points = {
b"hello world\n": "8P9ykg==",
b"different checksum": "DoesntMatter==",
}
try:
response["crc32c"] = crc32c_points[actual_data]
except KeyError:
pass
return response
@pytest.mark.parametrize(
"checker, data, actual_data, raises",
[
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(Crc32cChecker(), b"hello world", b"different checksum", (ChecksumError,)),
(Crc32cChecker(), b"hello world", b"hello world", ()),
],
)
def test_validate_headers(checker, data, actual_data, raises):
response = google_response_from_data(actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_headers(response.headers)
else:
checker.validate_headers(response.headers)
params = [
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"different size", (AssertionError,)),
]
if crcmod is not None:
params.append((Crc32cChecker(), b"hello world", b"hello world", ()))
params.append(
(Crc32cChecker(), b"hello world", b"different size", (ChecksumError,))
)
@pytest.mark.parametrize("checker, data, actual_data, raises", params)
def test_checker_validate_http_response(checker, data, actual_data, raises):
response = google_response_from_data(data, actual_data=actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_http_response(response)
else:
checker.validate_http_response(response)
params = [
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"different size", (AssertionError,)),
]
if crcmod is not None:
params.extend(
[
(Crc32cChecker(), b"hello world", b"different checksum", (ChecksumError,)),
(Crc32cChecker(), b"hello world\n", b"hello world\n", ()),
]
)
@pytest.mark.parametrize("checker, data, actual_data, raises", params)
def test_checker_validate_json_response(checker, data, actual_data, raises):
response = google_json_response_from_data(data, actual_data=actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_json_response(response)
else:
checker.validate_json_response(response) | gcsfs/tests/test_checkers.py | from gcsfs.utils import ChecksumError
from gcsfs.checkers import Crc32cChecker, MD5Checker, SizeChecker, crcmod
from hashlib import md5
import base64
import pytest
def google_response_from_data(expected_data: bytes, actual_data=None):
actual_data = actual_data or expected_data
checksum = md5(actual_data)
checksum_b64 = base64.b64encode(checksum.digest()).decode("UTF-8")
if crcmod is not None:
checksum = crcmod.Crc(0x11EDC6F41, initCrc=0, xorOut=0xFFFFFFFF)
checksum.update(actual_data)
crc = base64.b64encode(checksum.digest()).decode()
class response:
content_length = len(actual_data)
headers = {"X-Goog-Hash": f"md5={checksum_b64}"}
if crcmod is not None:
headers["X-Goog-Hash"] += f",crc32c={crc}"
return response
def google_json_response_from_data(expected_data: bytes, actual_data=None):
actual_data = actual_data or expected_data
checksum = md5(actual_data)
checksum_b64 = base64.b64encode(checksum.digest()).decode("UTF-8")
response = {"md5Hash": checksum_b64, "size": len(actual_data)}
# some manual checksums verified using gsutil ls -L
# also can add using https://crccalc.com/
# be careful about newlines
crc32c_points = {
b"hello world\n": "8P9ykg==",
b"different checksum": "DoesntMatter==",
}
try:
response["crc32c"] = crc32c_points[actual_data]
except KeyError:
pass
return response
@pytest.mark.parametrize(
"checker, data, actual_data, raises",
[
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(Crc32cChecker(), b"hello world", b"different checksum", (ChecksumError,)),
(Crc32cChecker(), b"hello world", b"hello world", ()),
],
)
def test_validate_headers(checker, data, actual_data, raises):
response = google_response_from_data(actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_headers(response.headers)
else:
checker.validate_headers(response.headers)
params = [
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"different size", (AssertionError,)),
]
if crcmod is not None:
params.append((Crc32cChecker(), b"hello world", b"hello world", ()))
params.append(
(Crc32cChecker(), b"hello world", b"different size", (ChecksumError,))
)
@pytest.mark.parametrize("checker, data, actual_data, raises", params)
def test_checker_validate_http_response(checker, data, actual_data, raises):
response = google_response_from_data(data, actual_data=actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_http_response(response)
else:
checker.validate_http_response(response)
params = [
(MD5Checker(), b"hello world", b"different checksum", (ChecksumError,)),
(MD5Checker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"hello world", ()),
(SizeChecker(), b"hello world", b"different size", (AssertionError,)),
]
if crcmod is not None:
params.extend(
[
(Crc32cChecker(), b"hello world", b"different checksum", (ChecksumError,)),
(Crc32cChecker(), b"hello world\n", b"hello world\n", ()),
]
)
@pytest.mark.parametrize("checker, data, actual_data, raises", params)
def test_checker_validate_json_response(checker, data, actual_data, raises):
response = google_json_response_from_data(data, actual_data=actual_data)
checker.update(data)
if raises:
with pytest.raises(raises):
checker.validate_json_response(response)
else:
checker.validate_json_response(response) | 0.557123 | 0.320968 |
from Level.Region import Region
import json
import os
class Data:
def __init__(self, level, number = 0):
self.regions = []
self.regionsGrid = []
self.level = level
self.number = number
self.deleteCount = 0
files = os.listdir(level.folder + "/region" + str(number))
files.sort()
self.smallest = self.getCoords(files[-1])
self.biggest = self.getCoords(files[-1])
for file in files:
coords = self.getCoords(file)
for i in range(2):
if coords[i] < self.smallest[i]:
self.smallest[i] = coords[i]
if coords[i] > self.biggest[i]:
self.biggest[i] = coords[i]
for y in range(self.biggest[1] + 1 - self.smallest[1]):
line = []
for x in range(self.biggest[0] + 1 - self.smallest[0]):
line.append(None)
self.regionsGrid.append(line)
for file in files:
coords = self.getCoords(file)
region = Region(level, number, *coords, regionIndex = len(self.regions))
self.regions.append(region)
self.regionsGrid[coords[1] - self.smallest[1]][coords[0] - self.smallest[0]] = region
def getCoords(self, string):
coords = []
coord = ""
first = False
for char in string:
if char == "-" and coord != "":
coords.append(int(coord))
coord = ""
first = True
elif char == ".":
coords.append(int(coord))
return coords
else:
coord += char
def load(self):
file = open(self.level.folder + "/data" + str(self.number) + ".json", "r")
self.jsonData = json.load(file)
file.close()
self.loadJsonData()
def loadJsonData(self):
# self.music = self.jsonData["Music"]
self.spawnX = self.jsonData["SpawnX"] - self.smallest[0] * 16
self.spawnY = self.jsonData["SpawnY"] - self.smallest[1] * 16
self.lifes = self.jsonData["Lifes"]
self.jumpTime = self.jsonData["JumpTime"]
self.jumpHeight = self.jsonData["JumpHeight"]
self.walkSpeed = self.jsonData["WalkSpeed"]
self.fallSpeed = self.jsonData["FallSpeed"]
self.fallSpeedMultiplier = self.jsonData["FallSpeedMultiplier"]
self.climbSpeed = self.jsonData["ClimbSpeed"]
self.description = self.jsonData["Description"]
def save(self):
file = open(self.level.folder + "/data" + str(self.number) + ".json", "w")
file.write(json.dumps(self.jsonData, sort_keys = True, indent = 4))
file.close()
def close(self):
for i in range(len(self.regions)):
del self.regions[i] | Level/Data.py | from Level.Region import Region
import json
import os
class Data:
def __init__(self, level, number = 0):
self.regions = []
self.regionsGrid = []
self.level = level
self.number = number
self.deleteCount = 0
files = os.listdir(level.folder + "/region" + str(number))
files.sort()
self.smallest = self.getCoords(files[-1])
self.biggest = self.getCoords(files[-1])
for file in files:
coords = self.getCoords(file)
for i in range(2):
if coords[i] < self.smallest[i]:
self.smallest[i] = coords[i]
if coords[i] > self.biggest[i]:
self.biggest[i] = coords[i]
for y in range(self.biggest[1] + 1 - self.smallest[1]):
line = []
for x in range(self.biggest[0] + 1 - self.smallest[0]):
line.append(None)
self.regionsGrid.append(line)
for file in files:
coords = self.getCoords(file)
region = Region(level, number, *coords, regionIndex = len(self.regions))
self.regions.append(region)
self.regionsGrid[coords[1] - self.smallest[1]][coords[0] - self.smallest[0]] = region
def getCoords(self, string):
coords = []
coord = ""
first = False
for char in string:
if char == "-" and coord != "":
coords.append(int(coord))
coord = ""
first = True
elif char == ".":
coords.append(int(coord))
return coords
else:
coord += char
def load(self):
file = open(self.level.folder + "/data" + str(self.number) + ".json", "r")
self.jsonData = json.load(file)
file.close()
self.loadJsonData()
def loadJsonData(self):
# self.music = self.jsonData["Music"]
self.spawnX = self.jsonData["SpawnX"] - self.smallest[0] * 16
self.spawnY = self.jsonData["SpawnY"] - self.smallest[1] * 16
self.lifes = self.jsonData["Lifes"]
self.jumpTime = self.jsonData["JumpTime"]
self.jumpHeight = self.jsonData["JumpHeight"]
self.walkSpeed = self.jsonData["WalkSpeed"]
self.fallSpeed = self.jsonData["FallSpeed"]
self.fallSpeedMultiplier = self.jsonData["FallSpeedMultiplier"]
self.climbSpeed = self.jsonData["ClimbSpeed"]
self.description = self.jsonData["Description"]
def save(self):
file = open(self.level.folder + "/data" + str(self.number) + ".json", "w")
file.write(json.dumps(self.jsonData, sort_keys = True, indent = 4))
file.close()
def close(self):
for i in range(len(self.regions)):
del self.regions[i] | 0.376279 | 0.215021 |
from placeholder import *
from loss import *
from optimizer import *
from layer import *
from tensor import *
from util import *
import numpy as np
from copy import copy
class Model(object):
"""
Grahp model class
"""
def __init__(self, input_placeholder, output_placeholder):
"""
初始化模型参数 input output和layers
"""
self.layers = set()
self.all_placeholders = {}
self.input_placeholders = input_placeholder if isinstance(input_placeholder, list) else [input_placeholder]
self.output_placeholders = input_placeholder if isinstance(output_placeholder, list) else [output_placeholder]
if any(map(lambda x: not isinstance(x, Input,), self.input_placeholders)):
raise NameError('input placeholder is not Input type!')
if any(map(lambda x: not isinstance(x, Input,), self.output_placeholders)):
raise NameError('output placeholder is not Input type!')
def find_placeholders_and_layers(self):
"""
从输出层遍历所有placeholders和layers, 保存并设置id
"""
all_placeholders = {}
queue = []
for placeholder in self.output_placeholders:
"""
从输出层回溯(添加到queue)
"""
id = uniq_id('placeholder')
placeholder.set_id(id)
all_placeholders[id] = placeholder
queue.append(placeholder)
while len(queue) > 0:
current_placeholder = queue[0]
depend_placeholders = current_placeholder.depend_placeholders
# 获取所有layers
if current_placeholder.input_layer is not None and current_placeholder.input_layer not in self.layers:
self.layers.add(current_placeholder.input_layer)
# 回溯到起点了
if depend_placeholders is None:
queue.pop(0)
continue
for depend_placeholder in depend_placeholders:
# 已经添加过得placeholder不再处理
if depend_placeholder.id is not None:
continue
# 获取所有placeholder
placeholder_id = uniq_id('placeholder')
depend_placeholder.set_id(placeholder_id)
all_placeholders[placeholder_id] = depend_placeholder
queue.append(depend_placeholder)
# 所有输入placeholder都获取到了, 处理下一个
queue.pop(0)
# 获取所有placeholders
self.all_placeholders = all_placeholders
def forward(self, input_datas):
"""
从输入层遍历计算所有placeholder的tensor
"""
ready_placeholders_ids = set()
waiting_placeholders_ids = set(self.all_placeholders.keys())
# 从输入层开始, 给输入节点赋值
for i, placeholder in enumerate(self.input_placeholders):
placeholder.set_tensor(input_datas[i])
ready_placeholders_ids.add(placeholder.id)
waiting_placeholders_ids.remove(placeholder.id)
# 遍历计算所有placeholder
while len(waiting_placeholders_ids) > 0:
next_placeholder = None
for waiting_placeholder_id in waiting_placeholders_ids:
waiting_placeholder = self.all_placeholders[waiting_placeholder_id]
depend_placeholders = waiting_placeholder.depend_placeholders
# 需要所有依赖的上游节点都ready
if any(map(lambda x: x.id not in ready_placeholders_ids, depend_placeholders)):
continue
# 当前节点的依赖
depend_tensors = [depend_placeholder.tensor for depend_placeholder in depend_placeholders]
# 计算当前节点的值
if len(depend_tensors) == 1:
output_tensor = waiting_placeholder.input_layer.forward(depend_tensors[0])
elif len(depend_tensors) == 2:
output_tensor = waiting_placeholder.input_layer.forward(depend_tensors[0], depend_tensors[1], )
else:
raise("depend tensors numbuer should be 1 or 2!")
waiting_placeholder.set_tensor(output_tensor)
next_placeholder = waiting_placeholder
break
# 已经计算出结果的节点放入ready集合
if next_placeholder is not None:
ready_placeholders_ids.add(waiting_placeholder.id)
waiting_placeholders_ids.remove(waiting_placeholder.id)
def compile(self, optimizer, loss, validation_data=None):
"""
设置模型优化方法、损失函数和校验数据集
"""
assert isinstance(optimizer, Optimizer)
assert isinstance(loss, Loss)
self.optimizer = optimizer
self.loss = loss
self.validation_data = validation_data
self.find_placeholders_and_layers()
self.optimizer.set_layers(self.layers)
def get_batch_data(self, data):
return data
def fit(self, input_data, output_data, n_epochs, batch_size=None):
"""
训练模型
"""
# 检查输入数据并转换成Tensor格式
input_datas = input_data if isinstance(input_data, list) else [input_data]
output_datas = output_data if isinstance(input_data, list) else [output_data]
if any(map(lambda x: not isinstance(x, np.ndarray), input_datas)):
raise NameError('input placeholder is not Numpy.Array type!')
if any(map(lambda x: not isinstance(x, np.ndarray,), output_datas)):
raise NameError('output placeholder is not Numpy.Array type!')
# 初始化模型所有layer的参数
metrices = []
accs = []
errs = []
for i in range(n_epochs):
for batch_data in get_batch_data(input_datas + output_datas):
batch_input_tensors = list(map(lambda x: Tensor(x, autograd=True), batch_data[:len(input_datas)]))
batch_output_tensors = list(map(lambda x: Tensor(x, autograd=True), batch_data[len(input_datas):]))
# 前向遍历
self.forward(batch_input_tensors)
# todo 暂时只支持单tensor输出
assert len(self.output_placeholders) == 1
assert len(batch_input_tensors) == 1
# 从损失函数结果反向传递梯度
err, acc = self.loss.back_propagate(batch_output_tensors[0], self.output_placeholders[0].tensor)
errs.append(err)
accs.append(acc)
# 更新模型所有参数
self.optimizer.update_layers()
s = "\rProgress[{}], error[{}], acc[{}%] ".format(i, float(err), acc*100)
print(s, end="", flush=True)
return errs, accs | model.py | from placeholder import *
from loss import *
from optimizer import *
from layer import *
from tensor import *
from util import *
import numpy as np
from copy import copy
class Model(object):
"""
Grahp model class
"""
def __init__(self, input_placeholder, output_placeholder):
"""
初始化模型参数 input output和layers
"""
self.layers = set()
self.all_placeholders = {}
self.input_placeholders = input_placeholder if isinstance(input_placeholder, list) else [input_placeholder]
self.output_placeholders = input_placeholder if isinstance(output_placeholder, list) else [output_placeholder]
if any(map(lambda x: not isinstance(x, Input,), self.input_placeholders)):
raise NameError('input placeholder is not Input type!')
if any(map(lambda x: not isinstance(x, Input,), self.output_placeholders)):
raise NameError('output placeholder is not Input type!')
def find_placeholders_and_layers(self):
"""
从输出层遍历所有placeholders和layers, 保存并设置id
"""
all_placeholders = {}
queue = []
for placeholder in self.output_placeholders:
"""
从输出层回溯(添加到queue)
"""
id = uniq_id('placeholder')
placeholder.set_id(id)
all_placeholders[id] = placeholder
queue.append(placeholder)
while len(queue) > 0:
current_placeholder = queue[0]
depend_placeholders = current_placeholder.depend_placeholders
# 获取所有layers
if current_placeholder.input_layer is not None and current_placeholder.input_layer not in self.layers:
self.layers.add(current_placeholder.input_layer)
# 回溯到起点了
if depend_placeholders is None:
queue.pop(0)
continue
for depend_placeholder in depend_placeholders:
# 已经添加过得placeholder不再处理
if depend_placeholder.id is not None:
continue
# 获取所有placeholder
placeholder_id = uniq_id('placeholder')
depend_placeholder.set_id(placeholder_id)
all_placeholders[placeholder_id] = depend_placeholder
queue.append(depend_placeholder)
# 所有输入placeholder都获取到了, 处理下一个
queue.pop(0)
# 获取所有placeholders
self.all_placeholders = all_placeholders
def forward(self, input_datas):
"""
从输入层遍历计算所有placeholder的tensor
"""
ready_placeholders_ids = set()
waiting_placeholders_ids = set(self.all_placeholders.keys())
# 从输入层开始, 给输入节点赋值
for i, placeholder in enumerate(self.input_placeholders):
placeholder.set_tensor(input_datas[i])
ready_placeholders_ids.add(placeholder.id)
waiting_placeholders_ids.remove(placeholder.id)
# 遍历计算所有placeholder
while len(waiting_placeholders_ids) > 0:
next_placeholder = None
for waiting_placeholder_id in waiting_placeholders_ids:
waiting_placeholder = self.all_placeholders[waiting_placeholder_id]
depend_placeholders = waiting_placeholder.depend_placeholders
# 需要所有依赖的上游节点都ready
if any(map(lambda x: x.id not in ready_placeholders_ids, depend_placeholders)):
continue
# 当前节点的依赖
depend_tensors = [depend_placeholder.tensor for depend_placeholder in depend_placeholders]
# 计算当前节点的值
if len(depend_tensors) == 1:
output_tensor = waiting_placeholder.input_layer.forward(depend_tensors[0])
elif len(depend_tensors) == 2:
output_tensor = waiting_placeholder.input_layer.forward(depend_tensors[0], depend_tensors[1], )
else:
raise("depend tensors numbuer should be 1 or 2!")
waiting_placeholder.set_tensor(output_tensor)
next_placeholder = waiting_placeholder
break
# 已经计算出结果的节点放入ready集合
if next_placeholder is not None:
ready_placeholders_ids.add(waiting_placeholder.id)
waiting_placeholders_ids.remove(waiting_placeholder.id)
def compile(self, optimizer, loss, validation_data=None):
"""
设置模型优化方法、损失函数和校验数据集
"""
assert isinstance(optimizer, Optimizer)
assert isinstance(loss, Loss)
self.optimizer = optimizer
self.loss = loss
self.validation_data = validation_data
self.find_placeholders_and_layers()
self.optimizer.set_layers(self.layers)
def get_batch_data(self, data):
return data
def fit(self, input_data, output_data, n_epochs, batch_size=None):
"""
训练模型
"""
# 检查输入数据并转换成Tensor格式
input_datas = input_data if isinstance(input_data, list) else [input_data]
output_datas = output_data if isinstance(input_data, list) else [output_data]
if any(map(lambda x: not isinstance(x, np.ndarray), input_datas)):
raise NameError('input placeholder is not Numpy.Array type!')
if any(map(lambda x: not isinstance(x, np.ndarray,), output_datas)):
raise NameError('output placeholder is not Numpy.Array type!')
# 初始化模型所有layer的参数
metrices = []
accs = []
errs = []
for i in range(n_epochs):
for batch_data in get_batch_data(input_datas + output_datas):
batch_input_tensors = list(map(lambda x: Tensor(x, autograd=True), batch_data[:len(input_datas)]))
batch_output_tensors = list(map(lambda x: Tensor(x, autograd=True), batch_data[len(input_datas):]))
# 前向遍历
self.forward(batch_input_tensors)
# todo 暂时只支持单tensor输出
assert len(self.output_placeholders) == 1
assert len(batch_input_tensors) == 1
# 从损失函数结果反向传递梯度
err, acc = self.loss.back_propagate(batch_output_tensors[0], self.output_placeholders[0].tensor)
errs.append(err)
accs.append(acc)
# 更新模型所有参数
self.optimizer.update_layers()
s = "\rProgress[{}], error[{}], acc[{}%] ".format(i, float(err), acc*100)
print(s, end="", flush=True)
return errs, accs | 0.360377 | 0.257967 |
import argparse
import json
import logging
import os
import sys
from conda_recipe_tools.git import FeedStock, NotFeedstockRepo
from conda_recipe_tools.util import get_feedstock_dirs
LOG_FORMAT = '%(asctime)s - %(levelname)s : %(message)s'
def read_last_commits(checkfile):
""" Read in the latest commits from a json checkfile if it exists. """
if not os.path.exists(checkfile):
return {}
with open(checkfile) as f:
last_commits = json.load(f)
return last_commits
def find_changed_feedstocks(feedstock_dirs, last_commits, remote_org):
""" Return a list of feedstocks which have changed. """
changed_feedstocks = []
for feedstock_dir in feedstock_dirs:
logging.info('checking: ' + feedstock_dir)
if feedstock_dir.endswith('/'):
feedstock_dir = feedstock_dir[:-1]
try:
feedstock = FeedStock(feedstock_dir)
except NotFeedstockRepo:
logging.warning('not a feedstock: ' + feedstock_dir)
continue
feedstock.add_remote(remote_org, check=False)
feedstock.fetch(remote_org)
commit_hash = feedstock.rev_parse(f'{remote_org}/master')
if last_commits.get(feedstock_dir) != commit_hash:
logging.info('feedstock has changed: ' + feedstock_dir)
changed_feedstocks.append(feedstock_dir)
last_commits[feedstock_dir] = commit_hash
return changed_feedstocks
def main():
parser = argparse.ArgumentParser(description=(
'Find feedstocks which have changed since they were last checked'))
parser.add_argument(
'feedstock_dir', nargs='*',
help='one or more feedstock directories to check')
parser.add_argument(
'--file', '-f', type=str,
help='file with feedstock directories to check')
parser.add_argument(
'--outfile', default='changed_feedstocks.txt', type=str,
help='name of file to write changed feedstocks.')
parser.add_argument(
'--checkfile', default='cf_feedstock_commits.json', type=str,
help='name of file to check and store the commit hashes')
parser.add_argument(
'--remote-org', default='conda-forge', type=str,
help='GitHub organization to check for updates.')
parser.add_argument(
'--base_dir', default='.', type=str,
help='feedstock base directory, default is current directory')
parser.add_argument(
'--log', default='info',
help='log level; debug, info, warning, error, critical')
args = parser.parse_args()
# set up logging
log_numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(log_numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log)
logging.basicConfig(level=log_numeric_level, format=LOG_FORMAT)
# find outdated feedstocks
feedstock_dirs = get_feedstock_dirs(args.feedstock_dir, args.file)
last_commits = read_last_commits(args.checkfile)
changed_feedstocks = find_changed_feedstocks(
feedstock_dirs, last_commits, args.remote_org)
# write checkfile and outfile
with open(args.checkfile, 'w') as f:
json.dump(last_commits, f)
with open(args.outfile, 'wt') as f:
for changed_feedstock in changed_feedstocks:
f.write(changed_feedstock+'\n')
return 0
if __name__ == "__main__":
sys.exit(main()) | conda_recipe_tools/cli/find_changed_feedstocks.py |
import argparse
import json
import logging
import os
import sys
from conda_recipe_tools.git import FeedStock, NotFeedstockRepo
from conda_recipe_tools.util import get_feedstock_dirs
LOG_FORMAT = '%(asctime)s - %(levelname)s : %(message)s'
def read_last_commits(checkfile):
""" Read in the latest commits from a json checkfile if it exists. """
if not os.path.exists(checkfile):
return {}
with open(checkfile) as f:
last_commits = json.load(f)
return last_commits
def find_changed_feedstocks(feedstock_dirs, last_commits, remote_org):
""" Return a list of feedstocks which have changed. """
changed_feedstocks = []
for feedstock_dir in feedstock_dirs:
logging.info('checking: ' + feedstock_dir)
if feedstock_dir.endswith('/'):
feedstock_dir = feedstock_dir[:-1]
try:
feedstock = FeedStock(feedstock_dir)
except NotFeedstockRepo:
logging.warning('not a feedstock: ' + feedstock_dir)
continue
feedstock.add_remote(remote_org, check=False)
feedstock.fetch(remote_org)
commit_hash = feedstock.rev_parse(f'{remote_org}/master')
if last_commits.get(feedstock_dir) != commit_hash:
logging.info('feedstock has changed: ' + feedstock_dir)
changed_feedstocks.append(feedstock_dir)
last_commits[feedstock_dir] = commit_hash
return changed_feedstocks
def main():
parser = argparse.ArgumentParser(description=(
'Find feedstocks which have changed since they were last checked'))
parser.add_argument(
'feedstock_dir', nargs='*',
help='one or more feedstock directories to check')
parser.add_argument(
'--file', '-f', type=str,
help='file with feedstock directories to check')
parser.add_argument(
'--outfile', default='changed_feedstocks.txt', type=str,
help='name of file to write changed feedstocks.')
parser.add_argument(
'--checkfile', default='cf_feedstock_commits.json', type=str,
help='name of file to check and store the commit hashes')
parser.add_argument(
'--remote-org', default='conda-forge', type=str,
help='GitHub organization to check for updates.')
parser.add_argument(
'--base_dir', default='.', type=str,
help='feedstock base directory, default is current directory')
parser.add_argument(
'--log', default='info',
help='log level; debug, info, warning, error, critical')
args = parser.parse_args()
# set up logging
log_numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(log_numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log)
logging.basicConfig(level=log_numeric_level, format=LOG_FORMAT)
# find outdated feedstocks
feedstock_dirs = get_feedstock_dirs(args.feedstock_dir, args.file)
last_commits = read_last_commits(args.checkfile)
changed_feedstocks = find_changed_feedstocks(
feedstock_dirs, last_commits, args.remote_org)
# write checkfile and outfile
with open(args.checkfile, 'w') as f:
json.dump(last_commits, f)
with open(args.outfile, 'wt') as f:
for changed_feedstock in changed_feedstocks:
f.write(changed_feedstock+'\n')
return 0
if __name__ == "__main__":
sys.exit(main()) | 0.319227 | 0.07989 |
import json
import jieba
from django.db.models import Q
from algo.model.model import CustomModel
from algo.model.model_config import BertBilstmCrfConfig
from keras.models import Model
from keras.layers import Bidirectional, LSTM, Dense, Dropout
from keras.optimizers import Adam
from keras_contrib.layers import CRF
import keras_bert
import os
from algo.models import NerData
# 获取词典
unk_flag = '[UNK]'
pad_flag = '[PAD]'
cls_flag = '[CLS]'
sep_flag = '[SEP]'
class BertBilstmCrf(CustomModel):
def __init__(self,
vocab_size: int,
n_class: int,
max_len: int = 100,
embedding_dim: int = 128,
rnn_units: int = 128,
drop_rate: float = 0.5,
):
self.vocab_size = vocab_size
self.n_class = n_class
self.max_len = max_len
self.embedding_dim = embedding_dim
self.rnn_units = rnn_units
self.drop_rate = drop_rate
self.config_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_config.json')
self.check_point_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_model.ckpt')
self.dict_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'vocab.txt')
self.epochs = 15
self.w2i = get_w2i() # word to index
self.one_hot = True
self.unk_index = self.w2i.get(unk_flag, 101)
self.pad_index = self.w2i.get(pad_flag, 1)
self.cls_index = self.w2i.get(cls_flag, 102)
self.sep_index = self.w2i.get(sep_flag, 103)
self.tag2index = get_tag2index() # tag to index
self.tag2index = get_tag2index() # tag to index
self.tag_size = len(self.tag2index)
def precess_data(self):
# 从数据库读取
queryset = NerData.objects.filter(~Q(human_tag=None))
# poses=[{"begin": 2, "end": 3, "pos": "LOC"}]
sentences = []
tags = []
for q in queryset:
sentence = q['text']
poses = json.loads(q['human_label'])
# 整理标注数据
tag = ['O'] * len(sentence)
for pos in poses:
begin = int(pos['begin'])
end = int(pos['end'])
pos_tag = pos['pos']
tag[begin] = f"B-{pos_tag}"
if end > begin:
tag[begin+1:end] = (end-begin-1) * [f"I-{pos_tag}"]
tags.append(tag)
sentences.append(sentence)
# 转化
data = self.data_to_index(sentences)
label = self.label_to_index(tags)
# 进行 one-hot处理
if self.one_hot:
def label_to_one_hot(index: []) -> []:
data = []
for line in index:
data_line = []
for i, index in enumerate(line):
line_line = [0]*self.tag_size
line_line[index] = 1
data_line.append(line_line)
data.append(data_line)
return np.array(data)
data_label = label_to_one_hot(index=label)
else:
data_label = np.expand_dims(label, 2)
train_data_proportion = 0.8
num = len(data[0])
self.train_data = [data[0][:, int(train_data_proportion*num):], data[1][:, int(train_data_proportion*num):]]
self.train_label = data_label[:, int(train_data_proportion*num):]
self.test_data = [data[0][:, :int(train_data_proportion*num)], data[1][:, :int(train_data_proportion*num)]]
self.test_label = data_label[:, :int(train_data_proportion*num)]
def label_to_index(self, tags):
"""
将训练数据x转化为index
:return:
"""
label_ids = []
line_label = []
for tag in tags:
for t in tag:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
t_index = self.tag2index.get(t, 0)
line_label.append(t_index) # label index
max_len_buff = self.max_len-2
if len(line_label) > max_len_buff: # 先进行截断
line_label = line_label[:max_len_buff]
line_label = [0] + line_label + [0]
# padding
if len(line_label) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_label)
line_label = [0] * pad_num + line_label
label_ids.append(np.array(line_label))
line_label = []
return np.array(label_ids)
def data_to_index(self, sentences):
"""
将训练数据x转化为index
:return:
"""
data_ids = []
data_types = []
line_data_ids = []
line_data_types = []
for sentence in sentences:
for w in sentence:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
w_index = self.w2i.get(w, self.unk_index)
line_data_ids.append(w_index) # index
line_data_types.append(0) # types
max_len_buff = self.max_len-2
if len(line_data_ids) > max_len_buff: # 先进行截断
line_data_ids = line_data_ids[:max_len_buff]
line_data_types = line_data_types[:max_len_buff]
line_data_ids = [self.cls_index] + line_data_ids + [self.sep_index]
line_data_types = [0] + line_data_types + [0]
# padding
if len(line_data_ids) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_data_ids)
line_data_ids = [self.pad_index]*pad_num + line_data_ids
line_data_types = [0] * pad_num + line_data_types
data_ids.append(np.array(line_data_ids))
data_types.append(np.array(line_data_types))
line_data_ids = []
line_data_types = []
return [np.array(data_ids), np.array(data_types)]
def build(self):
self.precess_data()
print('load bert Model start!')
model = keras_bert.load_trained_model_from_checkpoint(self.config_path,
checkpoint_file=self.check_point_path,
seq_len=self.max_len,
trainable=True)
print('load bert Model end!')
inputs = model.inputs
embedding = model.output
x = Bidirectional(LSTM(units=self.rnn_units, return_sequences=True))(embedding)
x = Dropout(self.drop_rate)(x)
x = Dense(self.n_class)(x)
self.crf = CRF(self.n_class, sparse_target=False)
x = self.crf(x)
self.model = Model(inputs=inputs, outputs=x)
self.model.summary()
self.model.compile(optimizer=Adam(1e-5),
loss=self.crf.loss_function,
metrics=[self.crf.accuracy])
def train(self):
train_model='BERTBILSTMCRF'
callback = TrainHistory(model_name=train_model) # 自定义回调 记录训练数据
early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束
self.model.fit(self.train_data, self.train_label, batch_size=32, epochs=self.epochs,
validation_data=[self.test_data, self.test_label],
callbacks=[callback, early_stopping])
# 计算 f1 和 recall值
pre = self.model.predict(self.test_data)
pre = np.array(pre)
test_label = np.array(self.test_label)
pre = np.argmax(pre, axis=2)
test_label = np.argmax(test_label, axis=2)
pre = pre.reshape(pre.shape[0] * pre.shape[1], )
test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )
f1score = f1_score(pre, test_label, average='macro')
recall = recall_score(pre, test_label, average='macro')
print("================================================")
print(f"--------------:f1: {f1score} --------------")
print(f"--------------:recall: {recall} --------------")
print("================================================")
# 把 f1 和 recall 添加到最后一个记录数据里面
info_list = callback.info
if info_list and len(info_list)>0:
last_info = info_list[-1]
last_info['f1'] = f1score
last_info['recall'] = recall
return info_list
def predict_all(self):
# 预测所有数据,并存储到数据库
queryset = NerData.objects.all()
sentences = [s['text'] for s in queryset]
data = self.data_to_index(sentences)
predict = self.model.predict(data)
# todo 将预测结果存储到数据库
from sklearn.metrics import f1_score, recall_score
import numpy as np
from keras.callbacks import EarlyStopping
from algo.model.process_data import DataProcess, get_w2i, get_tag2index
max_len = 100
def train_sample(epochs=15):
# bert需要不同的数据参数 获取训练和测试数据
dp = DataProcess(data_type='msra', max_len=max_len, model='bert')
# todo 改为从数据库读取数据
train_data, train_label, test_data, test_label = dp.get_data(one_hot=True)
print("----------------------------数据信息 START--------------------------")
print(f"当前使用数据集 MSRA")
# log.info(f"train_data:{train_data.shape}")
print(f"train_label:{train_label.shape}")
# log.info(f"test_data:{test_data.shape}")
print(f"test_label:{test_label.shape}")
print("----------------------------数据信息 END--------------------------")
model_class = BertBilstmCrf(dp.vocab_size, dp.tag_size, max_len=max_len)
model = model_class.build()
train_model='BERTBILSTMCRF'
callback = TrainHistory(model_name=train_model) # 自定义回调 记录训练数据
early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束
model.fit(train_data, train_label, batch_size=32, epochs=epochs,
validation_data=[test_data, test_label],
callbacks=[callback, early_stopping])
# 计算 f1 和 recall值
pre = model.predict(test_data)
pre = np.array(pre)
test_label = np.array(test_label)
pre = np.argmax(pre, axis=2)
test_label = np.argmax(test_label, axis=2)
pre = pre.reshape(pre.shape[0] * pre.shape[1], )
test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )
f1score = f1_score(pre, test_label, average='macro')
recall = recall_score(pre, test_label, average='macro')
print("================================================")
print(f"--------------:f1: {f1score} --------------")
print(f"--------------:recall: {recall} --------------")
print("================================================")
# 把 f1 和 recall 添加到最后一个记录数据里面
info_list = callback.info
if info_list and len(info_list)>0:
last_info = info_list[-1]
last_info['f1'] = f1score
last_info['recall'] = recall
return info_list
import keras
class TrainHistory(keras.callbacks.Callback):
def __init__(self, model_name=None):
super(TrainHistory, self).__init__()
self.model_name = model_name
self.epoch = 0
self.info = []
def on_epoch_begin(self, epoch, logs=None):
self.epoch = epoch
message = f"begin epoch: {self.epoch}"
print(message)
def on_epoch_end(self, epoch, logs={}):
message = f'end epoch: {epoch} loss:{logs["loss"]} val_loss:{logs["val_loss"]} acc:{logs["crf_viterbi_accuracy"]} val_acc:{logs["val_crf_viterbi_accuracy"]}'
print(message)
dict = {
'model_name':self.model_name,
'epoch': self.epoch+1,
'loss': logs["loss"],
'acc': logs['crf_viterbi_accuracy'],
'val_loss': logs["val_loss"],
'val_acc': logs['val_crf_viterbi_accuracy']
}
self.info.append(dict)
def on_batch_end(self, batch, logs={}):
message = f'{self.model_name} epoch: {self.epoch} batch:{batch} loss:{logs["loss"]} acc:{logs["crf_viterbi_accuracy"]}'
print(message) | backend/algo/model/bert_bilstm_crf.py | import json
import jieba
from django.db.models import Q
from algo.model.model import CustomModel
from algo.model.model_config import BertBilstmCrfConfig
from keras.models import Model
from keras.layers import Bidirectional, LSTM, Dense, Dropout
from keras.optimizers import Adam
from keras_contrib.layers import CRF
import keras_bert
import os
from algo.models import NerData
# 获取词典
unk_flag = '[UNK]'
pad_flag = '[PAD]'
cls_flag = '[CLS]'
sep_flag = '[SEP]'
class BertBilstmCrf(CustomModel):
def __init__(self,
vocab_size: int,
n_class: int,
max_len: int = 100,
embedding_dim: int = 128,
rnn_units: int = 128,
drop_rate: float = 0.5,
):
self.vocab_size = vocab_size
self.n_class = n_class
self.max_len = max_len
self.embedding_dim = embedding_dim
self.rnn_units = rnn_units
self.drop_rate = drop_rate
self.config_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_config.json')
self.check_point_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'bert_model.ckpt')
self.dict_path = os.path.join(BertBilstmCrfConfig.BERT_MODEL_DIR, 'vocab.txt')
self.epochs = 15
self.w2i = get_w2i() # word to index
self.one_hot = True
self.unk_index = self.w2i.get(unk_flag, 101)
self.pad_index = self.w2i.get(pad_flag, 1)
self.cls_index = self.w2i.get(cls_flag, 102)
self.sep_index = self.w2i.get(sep_flag, 103)
self.tag2index = get_tag2index() # tag to index
self.tag2index = get_tag2index() # tag to index
self.tag_size = len(self.tag2index)
def precess_data(self):
# 从数据库读取
queryset = NerData.objects.filter(~Q(human_tag=None))
# poses=[{"begin": 2, "end": 3, "pos": "LOC"}]
sentences = []
tags = []
for q in queryset:
sentence = q['text']
poses = json.loads(q['human_label'])
# 整理标注数据
tag = ['O'] * len(sentence)
for pos in poses:
begin = int(pos['begin'])
end = int(pos['end'])
pos_tag = pos['pos']
tag[begin] = f"B-{pos_tag}"
if end > begin:
tag[begin+1:end] = (end-begin-1) * [f"I-{pos_tag}"]
tags.append(tag)
sentences.append(sentence)
# 转化
data = self.data_to_index(sentences)
label = self.label_to_index(tags)
# 进行 one-hot处理
if self.one_hot:
def label_to_one_hot(index: []) -> []:
data = []
for line in index:
data_line = []
for i, index in enumerate(line):
line_line = [0]*self.tag_size
line_line[index] = 1
data_line.append(line_line)
data.append(data_line)
return np.array(data)
data_label = label_to_one_hot(index=label)
else:
data_label = np.expand_dims(label, 2)
train_data_proportion = 0.8
num = len(data[0])
self.train_data = [data[0][:, int(train_data_proportion*num):], data[1][:, int(train_data_proportion*num):]]
self.train_label = data_label[:, int(train_data_proportion*num):]
self.test_data = [data[0][:, :int(train_data_proportion*num)], data[1][:, :int(train_data_proportion*num)]]
self.test_label = data_label[:, :int(train_data_proportion*num)]
def label_to_index(self, tags):
"""
将训练数据x转化为index
:return:
"""
label_ids = []
line_label = []
for tag in tags:
for t in tag:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
t_index = self.tag2index.get(t, 0)
line_label.append(t_index) # label index
max_len_buff = self.max_len-2
if len(line_label) > max_len_buff: # 先进行截断
line_label = line_label[:max_len_buff]
line_label = [0] + line_label + [0]
# padding
if len(line_label) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_label)
line_label = [0] * pad_num + line_label
label_ids.append(np.array(line_label))
line_label = []
return np.array(label_ids)
def data_to_index(self, sentences):
"""
将训练数据x转化为index
:return:
"""
data_ids = []
data_types = []
line_data_ids = []
line_data_types = []
for sentence in sentences:
for w in sentence:
# bert 需要输入index和types 由于我们这边都是只有一句的,所以type都为0
w_index = self.w2i.get(w, self.unk_index)
line_data_ids.append(w_index) # index
line_data_types.append(0) # types
max_len_buff = self.max_len-2
if len(line_data_ids) > max_len_buff: # 先进行截断
line_data_ids = line_data_ids[:max_len_buff]
line_data_types = line_data_types[:max_len_buff]
line_data_ids = [self.cls_index] + line_data_ids + [self.sep_index]
line_data_types = [0] + line_data_types + [0]
# padding
if len(line_data_ids) < self.max_len: # 填充到最大长度
pad_num = self.max_len - len(line_data_ids)
line_data_ids = [self.pad_index]*pad_num + line_data_ids
line_data_types = [0] * pad_num + line_data_types
data_ids.append(np.array(line_data_ids))
data_types.append(np.array(line_data_types))
line_data_ids = []
line_data_types = []
return [np.array(data_ids), np.array(data_types)]
def build(self):
self.precess_data()
print('load bert Model start!')
model = keras_bert.load_trained_model_from_checkpoint(self.config_path,
checkpoint_file=self.check_point_path,
seq_len=self.max_len,
trainable=True)
print('load bert Model end!')
inputs = model.inputs
embedding = model.output
x = Bidirectional(LSTM(units=self.rnn_units, return_sequences=True))(embedding)
x = Dropout(self.drop_rate)(x)
x = Dense(self.n_class)(x)
self.crf = CRF(self.n_class, sparse_target=False)
x = self.crf(x)
self.model = Model(inputs=inputs, outputs=x)
self.model.summary()
self.model.compile(optimizer=Adam(1e-5),
loss=self.crf.loss_function,
metrics=[self.crf.accuracy])
def train(self):
train_model='BERTBILSTMCRF'
callback = TrainHistory(model_name=train_model) # 自定义回调 记录训练数据
early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束
self.model.fit(self.train_data, self.train_label, batch_size=32, epochs=self.epochs,
validation_data=[self.test_data, self.test_label],
callbacks=[callback, early_stopping])
# 计算 f1 和 recall值
pre = self.model.predict(self.test_data)
pre = np.array(pre)
test_label = np.array(self.test_label)
pre = np.argmax(pre, axis=2)
test_label = np.argmax(test_label, axis=2)
pre = pre.reshape(pre.shape[0] * pre.shape[1], )
test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )
f1score = f1_score(pre, test_label, average='macro')
recall = recall_score(pre, test_label, average='macro')
print("================================================")
print(f"--------------:f1: {f1score} --------------")
print(f"--------------:recall: {recall} --------------")
print("================================================")
# 把 f1 和 recall 添加到最后一个记录数据里面
info_list = callback.info
if info_list and len(info_list)>0:
last_info = info_list[-1]
last_info['f1'] = f1score
last_info['recall'] = recall
return info_list
def predict_all(self):
# 预测所有数据,并存储到数据库
queryset = NerData.objects.all()
sentences = [s['text'] for s in queryset]
data = self.data_to_index(sentences)
predict = self.model.predict(data)
# todo 将预测结果存储到数据库
from sklearn.metrics import f1_score, recall_score
import numpy as np
from keras.callbacks import EarlyStopping
from algo.model.process_data import DataProcess, get_w2i, get_tag2index
max_len = 100
def train_sample(epochs=15):
# bert需要不同的数据参数 获取训练和测试数据
dp = DataProcess(data_type='msra', max_len=max_len, model='bert')
# todo 改为从数据库读取数据
train_data, train_label, test_data, test_label = dp.get_data(one_hot=True)
print("----------------------------数据信息 START--------------------------")
print(f"当前使用数据集 MSRA")
# log.info(f"train_data:{train_data.shape}")
print(f"train_label:{train_label.shape}")
# log.info(f"test_data:{test_data.shape}")
print(f"test_label:{test_label.shape}")
print("----------------------------数据信息 END--------------------------")
model_class = BertBilstmCrf(dp.vocab_size, dp.tag_size, max_len=max_len)
model = model_class.build()
train_model='BERTBILSTMCRF'
callback = TrainHistory(model_name=train_model) # 自定义回调 记录训练数据
early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束
model.fit(train_data, train_label, batch_size=32, epochs=epochs,
validation_data=[test_data, test_label],
callbacks=[callback, early_stopping])
# 计算 f1 和 recall值
pre = model.predict(test_data)
pre = np.array(pre)
test_label = np.array(test_label)
pre = np.argmax(pre, axis=2)
test_label = np.argmax(test_label, axis=2)
pre = pre.reshape(pre.shape[0] * pre.shape[1], )
test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )
f1score = f1_score(pre, test_label, average='macro')
recall = recall_score(pre, test_label, average='macro')
print("================================================")
print(f"--------------:f1: {f1score} --------------")
print(f"--------------:recall: {recall} --------------")
print("================================================")
# 把 f1 和 recall 添加到最后一个记录数据里面
info_list = callback.info
if info_list and len(info_list)>0:
last_info = info_list[-1]
last_info['f1'] = f1score
last_info['recall'] = recall
return info_list
import keras
class TrainHistory(keras.callbacks.Callback):
def __init__(self, model_name=None):
super(TrainHistory, self).__init__()
self.model_name = model_name
self.epoch = 0
self.info = []
def on_epoch_begin(self, epoch, logs=None):
self.epoch = epoch
message = f"begin epoch: {self.epoch}"
print(message)
def on_epoch_end(self, epoch, logs={}):
message = f'end epoch: {epoch} loss:{logs["loss"]} val_loss:{logs["val_loss"]} acc:{logs["crf_viterbi_accuracy"]} val_acc:{logs["val_crf_viterbi_accuracy"]}'
print(message)
dict = {
'model_name':self.model_name,
'epoch': self.epoch+1,
'loss': logs["loss"],
'acc': logs['crf_viterbi_accuracy'],
'val_loss': logs["val_loss"],
'val_acc': logs['val_crf_viterbi_accuracy']
}
self.info.append(dict)
def on_batch_end(self, batch, logs={}):
message = f'{self.model_name} epoch: {self.epoch} batch:{batch} loss:{logs["loss"]} acc:{logs["crf_viterbi_accuracy"]}'
print(message) | 0.398758 | 0.187374 |
import click
import plistlib
import subprocess
import os
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
GREEN_CHECK = '\033[1;32;40m[✓]\033[0;37;40m'
YELLOW_CHECK = '\033[1;33;40m[!]\033[0;37;40m'
FAIL_DOT = '\033[1;31;40m✗\033[0;37;40m'
PASS_DOT = '\033[1;32;40m✗\033[0;37;40m'
IDENT = ' '
def replace_in_file(filename, orgstr, newstr):
with open(filename, "rt", encoding="ISO-8859-1") as f:
newText=f.read().replace(wrap_with_string(orgstr), wrap_with_string(newstr))
f.close()
with open(filename, "wt", encoding="ISO-8859-1") as f:
f.write(newText)
f.close()
def wrap_with_string(instr):
return "<string>" + instr + "</string"
def dump_infoplist(bundle):
info_filename = bundle + '/Info.plist'
with open(info_filename, 'rb') as fp:
info = plistlib.load(fp)
props = info["ApplicationProperties"]
print('ApplicationPath: ' + props["ApplicationPath"])
print('CFBundleIdentifier: ' + props["CFBundleIdentifier"])
print('SigningIdentity: ' + props["SigningIdentity"])
print('Team: ' + props["Team"])
def get_infoplist(info_filename):
with open(info_filename, 'rb') as fp:
return plistlib.load(fp)
@click.group()
def main():
"""
Handy commands for iOS developers.
"""
pass
@main.command()
@click.option('--bundle', help='Path to xcarchive bundle.')
def amend(bundle):
"""Set bundle properties."""
if bundle and os.path.isdir(bundle):
if bundle.endswith('.xcarchive'):
amend_xcarchive(bundle)
else:
click.echo('Bundle was not found.')
def amend_xcarchive(bundle):
info_filename = bundle + '/Info.plist'
info = get_infoplist(info_filename)
props = info["ApplicationProperties"]
bundle_id = input('App Bundle Identifier [' + props["CFBundleIdentifier"] + ']: ')
signing_identity = input('Signing Identity [' + props["SigningIdentity"] + ']: ')
team_id = input('Apple Developer Team Id [' + props["Team"] + ']: ')
if bundle_id:
old_bid = props["CFBundleIdentifier"]
replace_in_file(info_filename, old_bid, bundle_id)
# Update bundle id in app bundle.
app_path_info = bundle + '/Products/' + props["ApplicationPath"] + '/Info.plist'
replace_in_file(app_path_info, old_bid, bundle_id)
print('+ Changed bundle identifier from \'' + props["CFBundleIdentifier"] + '\' to \'' + bundle_id + '\'')
if signing_identity:
replace_in_file(info_filename, props["SigningIdentity"], signing_identity)
print('+ Changed signing identity from \'' + props["SigningIdentity"] + '\' to \'' + signing_identity + '\'')
if team_id:
replace_in_file(info_filename, props["Team"], team_id)
print('+ Changed team id from \'' + props["Team"] + '\' to \'' + team_id + '\'')
@main.command()
@click.option('--bundle', help='Path to app bundle.')
def analyze(bundle):
"""Analyze iOS app bundle. Displays relevant information."""
dump_infoplist(bundle)
@main.command()
@click.option('--verbose/--no-verbose', default=False, help='Verbose output.')
def doctor(verbose):
"""Show information about the installed tooling."""
print('Doctor summary (to see all details, run doctor --verbose):')
pro = subprocess.run(["which", "codesign", "/dev/null"], capture_output=True)
if pro.returncode == 1:
print(GREEN_CHECK + ' Codesign tool installed')
if verbose:
print(IDENT + PASS_DOT + ' `which codesign`, exit 1')
else:
print(GREEN_CHECK + ' Codesign tool missing')
if verbose:
print(IDENT + FAIL_DOT + ' `which codesign`, exit ' + str(pro.returncode))
pro = subprocess.run(["xcodebuild", "-version", "/dev/null"], capture_output=True)
if pro.returncode == 0:
xcinf = pro.stdout.decode('utf-8').split('\n')
print(GREEN_CHECK + ' ' + xcinf[0] + ' (' + xcinf[1] + ') installed')
if verbose:
print(IDENT + PASS_DOT + ' `xcodebuild -version`, exit 0')
else:
print(GREEN_CHECK + ' Xcode missing')
if verbose:
print(IDENT + FAIL_DOT + ' `xcodebuild -version`, exit ' + str(pro.returncode))
pro = subprocess.run(["/usr/bin/security", "-q", "find-certificate", "-a", "-c", "iPhone Developer:"], capture_output=True)
if pro.returncode == 0:
secout = pro.stdout.decode('utf-8').split('\n')
raw_labls = list(filter(lambda s: 'labl' in s, secout))
labls = set(list(map(lambda s: s[18:len(s)-1], raw_labls)))
if len(labls) > 0:
print(GREEN_CHECK + ' Keychain developer certifications')
for _, val in enumerate(labls):
print(IDENT + PASS_DOT + ' ' + val)
else:
print(GREEN_CHECK + ' Installed iOS developer certifications installed')
else:
print(GREEN_CHECK + ' List of keychain developer certifications failed')
pro = subprocess.run(["/usr/bin/security", "-q", "find-certificate", "-a", "-c", "iPhone Distribution:"], capture_output=True)
if pro.returncode == 0:
secout = pro.stdout.decode('utf-8').split('\n')
raw_labls = list(filter(lambda s: 'labl' in s, secout))
labls = set(list(map(lambda s: s[18:len(s)-1], raw_labls)))
if len(labls) > 0:
print(GREEN_CHECK + ' Installed distribution certifications')
for _, val in enumerate(labls):
print(IDENT + PASS_DOT + ' ' + val)
else:
print(GREEN_CHECK + ' No iOS distribution certifications installed')
else:
print(GREEN_CHECK + ' List of keychain distribution certifications failed')
if __name__ == "__main__":
main() | cider.py | import click
import plistlib
import subprocess
import os
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
GREEN_CHECK = '\033[1;32;40m[✓]\033[0;37;40m'
YELLOW_CHECK = '\033[1;33;40m[!]\033[0;37;40m'
FAIL_DOT = '\033[1;31;40m✗\033[0;37;40m'
PASS_DOT = '\033[1;32;40m✗\033[0;37;40m'
IDENT = ' '
def replace_in_file(filename, orgstr, newstr):
with open(filename, "rt", encoding="ISO-8859-1") as f:
newText=f.read().replace(wrap_with_string(orgstr), wrap_with_string(newstr))
f.close()
with open(filename, "wt", encoding="ISO-8859-1") as f:
f.write(newText)
f.close()
def wrap_with_string(instr):
return "<string>" + instr + "</string"
def dump_infoplist(bundle):
info_filename = bundle + '/Info.plist'
with open(info_filename, 'rb') as fp:
info = plistlib.load(fp)
props = info["ApplicationProperties"]
print('ApplicationPath: ' + props["ApplicationPath"])
print('CFBundleIdentifier: ' + props["CFBundleIdentifier"])
print('SigningIdentity: ' + props["SigningIdentity"])
print('Team: ' + props["Team"])
def get_infoplist(info_filename):
with open(info_filename, 'rb') as fp:
return plistlib.load(fp)
@click.group()
def main():
"""
Handy commands for iOS developers.
"""
pass
@main.command()
@click.option('--bundle', help='Path to xcarchive bundle.')
def amend(bundle):
"""Set bundle properties."""
if bundle and os.path.isdir(bundle):
if bundle.endswith('.xcarchive'):
amend_xcarchive(bundle)
else:
click.echo('Bundle was not found.')
def amend_xcarchive(bundle):
info_filename = bundle + '/Info.plist'
info = get_infoplist(info_filename)
props = info["ApplicationProperties"]
bundle_id = input('App Bundle Identifier [' + props["CFBundleIdentifier"] + ']: ')
signing_identity = input('Signing Identity [' + props["SigningIdentity"] + ']: ')
team_id = input('Apple Developer Team Id [' + props["Team"] + ']: ')
if bundle_id:
old_bid = props["CFBundleIdentifier"]
replace_in_file(info_filename, old_bid, bundle_id)
# Update bundle id in app bundle.
app_path_info = bundle + '/Products/' + props["ApplicationPath"] + '/Info.plist'
replace_in_file(app_path_info, old_bid, bundle_id)
print('+ Changed bundle identifier from \'' + props["CFBundleIdentifier"] + '\' to \'' + bundle_id + '\'')
if signing_identity:
replace_in_file(info_filename, props["SigningIdentity"], signing_identity)
print('+ Changed signing identity from \'' + props["SigningIdentity"] + '\' to \'' + signing_identity + '\'')
if team_id:
replace_in_file(info_filename, props["Team"], team_id)
print('+ Changed team id from \'' + props["Team"] + '\' to \'' + team_id + '\'')
@main.command()
@click.option('--bundle', help='Path to app bundle.')
def analyze(bundle):
"""Analyze iOS app bundle. Displays relevant information."""
dump_infoplist(bundle)
@main.command()
@click.option('--verbose/--no-verbose', default=False, help='Verbose output.')
def doctor(verbose):
"""Show information about the installed tooling."""
print('Doctor summary (to see all details, run doctor --verbose):')
pro = subprocess.run(["which", "codesign", "/dev/null"], capture_output=True)
if pro.returncode == 1:
print(GREEN_CHECK + ' Codesign tool installed')
if verbose:
print(IDENT + PASS_DOT + ' `which codesign`, exit 1')
else:
print(GREEN_CHECK + ' Codesign tool missing')
if verbose:
print(IDENT + FAIL_DOT + ' `which codesign`, exit ' + str(pro.returncode))
pro = subprocess.run(["xcodebuild", "-version", "/dev/null"], capture_output=True)
if pro.returncode == 0:
xcinf = pro.stdout.decode('utf-8').split('\n')
print(GREEN_CHECK + ' ' + xcinf[0] + ' (' + xcinf[1] + ') installed')
if verbose:
print(IDENT + PASS_DOT + ' `xcodebuild -version`, exit 0')
else:
print(GREEN_CHECK + ' Xcode missing')
if verbose:
print(IDENT + FAIL_DOT + ' `xcodebuild -version`, exit ' + str(pro.returncode))
pro = subprocess.run(["/usr/bin/security", "-q", "find-certificate", "-a", "-c", "iPhone Developer:"], capture_output=True)
if pro.returncode == 0:
secout = pro.stdout.decode('utf-8').split('\n')
raw_labls = list(filter(lambda s: 'labl' in s, secout))
labls = set(list(map(lambda s: s[18:len(s)-1], raw_labls)))
if len(labls) > 0:
print(GREEN_CHECK + ' Keychain developer certifications')
for _, val in enumerate(labls):
print(IDENT + PASS_DOT + ' ' + val)
else:
print(GREEN_CHECK + ' Installed iOS developer certifications installed')
else:
print(GREEN_CHECK + ' List of keychain developer certifications failed')
pro = subprocess.run(["/usr/bin/security", "-q", "find-certificate", "-a", "-c", "iPhone Distribution:"], capture_output=True)
if pro.returncode == 0:
secout = pro.stdout.decode('utf-8').split('\n')
raw_labls = list(filter(lambda s: 'labl' in s, secout))
labls = set(list(map(lambda s: s[18:len(s)-1], raw_labls)))
if len(labls) > 0:
print(GREEN_CHECK + ' Installed distribution certifications')
for _, val in enumerate(labls):
print(IDENT + PASS_DOT + ' ' + val)
else:
print(GREEN_CHECK + ' No iOS distribution certifications installed')
else:
print(GREEN_CHECK + ' List of keychain distribution certifications failed')
if __name__ == "__main__":
main() | 0.464659 | 0.116814 |
from .base import API
from .utils import lambda_method, make_data
class NotificationAPI(API):
@lambda_method
def create_email_provider(self, name, description, url, port, email, password):
import cloud.notification.create_email_provider as method
params = {
'name': name,
'description': description,
'url': url,
'port': port,
'email': email,
'password': password,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_email_provider(self, name):
import cloud.notification.delete_email_provider as method
params = {
'name': name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_email_provider(self, name):
import cloud.notification.get_email_provider as method
params = {
'name': name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_email_providers(self, start_key=None):
import cloud.notification.get_email_providers as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_email(self, email_provider_name, title, content, send_to):
import cloud.notification.send_email as method
params = {
'email_provider_name': email_provider_name,
'title': title,
'content': content,
'send_to': send_to,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def update_email_provider(self, name, description=None, url=None, port=None, email=None, password=None):
import cloud.notification.update_email_provider as method
params = {
'name': name,
'description': description,
'url': url,
'port': port,
'email': email,
'password': password,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_sms(self, message, phone_number):
import cloud.notification.send_sms as method
params = {
'message': message,
'phone_number': phone_number
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def create_slack_webhook(self, name, url):
import cloud.notification.create_slack_webhook as method
params = {
'name': name,
'url': url,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_slack_webhook(self, name):
import cloud.notification.delete_slack_webhook as method
params = {
'name': name
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_slack_webhooks(self, start_key):
import cloud.notification.get_slack_webhooks as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_slack_message(self, slack_webhook_name, text, username=None, icon_url=None, icon_emoji=None, channel=None):
import cloud.notification.send_slack_message as method
params = {
'slack_webhook_name': slack_webhook_name,
'text': text,
'username': username,
'icon_url': icon_url,
'icon_emoji': icon_emoji,
'channel': channel,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def create_system_notification(self, slack_webhook_name):
import cloud.notification.create_system_notification as method
params = {
'slack_webhook_name': slack_webhook_name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_system_notification(self, system_notification_id):
import cloud.notification.delete_system_notification as method
params = {
'system_notification_id': system_notification_id
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_system_notifications(self, start_key):
import cloud.notification.get_system_notifications as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource) | aws_interface/core/api/notification.py | from .base import API
from .utils import lambda_method, make_data
class NotificationAPI(API):
@lambda_method
def create_email_provider(self, name, description, url, port, email, password):
import cloud.notification.create_email_provider as method
params = {
'name': name,
'description': description,
'url': url,
'port': port,
'email': email,
'password': password,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_email_provider(self, name):
import cloud.notification.delete_email_provider as method
params = {
'name': name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_email_provider(self, name):
import cloud.notification.get_email_provider as method
params = {
'name': name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_email_providers(self, start_key=None):
import cloud.notification.get_email_providers as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_email(self, email_provider_name, title, content, send_to):
import cloud.notification.send_email as method
params = {
'email_provider_name': email_provider_name,
'title': title,
'content': content,
'send_to': send_to,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def update_email_provider(self, name, description=None, url=None, port=None, email=None, password=None):
import cloud.notification.update_email_provider as method
params = {
'name': name,
'description': description,
'url': url,
'port': port,
'email': email,
'password': password,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_sms(self, message, phone_number):
import cloud.notification.send_sms as method
params = {
'message': message,
'phone_number': phone_number
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def create_slack_webhook(self, name, url):
import cloud.notification.create_slack_webhook as method
params = {
'name': name,
'url': url,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_slack_webhook(self, name):
import cloud.notification.delete_slack_webhook as method
params = {
'name': name
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_slack_webhooks(self, start_key):
import cloud.notification.get_slack_webhooks as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def send_slack_message(self, slack_webhook_name, text, username=None, icon_url=None, icon_emoji=None, channel=None):
import cloud.notification.send_slack_message as method
params = {
'slack_webhook_name': slack_webhook_name,
'text': text,
'username': username,
'icon_url': icon_url,
'icon_emoji': icon_emoji,
'channel': channel,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def create_system_notification(self, slack_webhook_name):
import cloud.notification.create_system_notification as method
params = {
'slack_webhook_name': slack_webhook_name,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def delete_system_notification(self, system_notification_id):
import cloud.notification.delete_system_notification as method
params = {
'system_notification_id': system_notification_id
}
data = make_data(self.app_id, params)
return method.do(data, self.resource)
@lambda_method
def get_system_notifications(self, start_key):
import cloud.notification.get_system_notifications as method
params = {
'start_key': start_key,
}
data = make_data(self.app_id, params)
return method.do(data, self.resource) | 0.476336 | 0.063279 |
from shellish import autocommand
import unittest
class PositionalTests(unittest.TestCase):
def test_one_pos(self):
@autocommand
def f(one):
self.assertEqual(one, 'ONE')
f(argv='ONE')
with self.assertRaises(SystemExit):
f(argv='')
def test_2_and_3_pos(self):
@autocommand
def f2(one, two):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
f2(argv='ONE TWO')
with self.assertRaises(SystemExit):
f2(argv='ONE')
@autocommand
def f3(one, two, three):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
self.assertEqual(three, 'THREE')
f3(argv='ONE TWO THREE')
with self.assertRaises(SystemExit):
f3(argv='ONE TWO')
def test_only_varargs(self):
@autocommand
def f(*args):
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='ONE TWO')
def test_one_pos_and_varargs(self):
@autocommand
def f(one, *args):
self.assertEqual(one, 'posONE')
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='posONE ONE TWO')
def test_2_pos_and_varargs(self):
@autocommand
def f(one, two, *args):
self.assertEqual(one, 'posONE')
self.assertEqual(two, 'posTWO')
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='posONE posTWO ONE TWO')
def test_empty_varargs(self):
@autocommand
def f(*args):
self.assertEqual(len(args), 0)
f(argv='')
class KeywordTests(unittest.TestCase):
def test_one_keyword(self):
@autocommand
def f(one=None):
self.assertEqual(one, 'ONE')
f(argv='--one ONE')
def test_two_keywords(self):
@autocommand
def f(one=None, two=None):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
f(argv='--one ONE --two TWO')
def test_only_varkwargs(self):
@autocommand
def f(**kwargs):
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(len(kwargs), 2)
f(argv='--kwargs --one ONE --two TWO')
def test_onekw_and_varkwargs(self):
@autocommand
def f(first=None, **kwargs):
self.assertEqual(first, 'FIRST')
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(len(kwargs), 2)
f(argv='--first FIRST --kwargs --one ONE --two TWO')
def test_varkwargs_mixed_patterns(self):
@autocommand
def f(**kwargs):
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(kwargs['three'], 'THREE')
self.assertEqual(len(kwargs), 3)
f(argv='--kwargs --one ONE --two TWO --three THREE')
f(argv='--kwargs one=ONE two=TWO three=THREE')
f(argv='--kwargs --one ONE two=TWO --three THREE')
f(argv='--kwargs one=ONE --two TWO three=THREE')
def test_empty_varkwargs(self):
@autocommand
def f(**kwargs):
self.assertEqual(len(kwargs), 0)
f(argv='')
class CombinationTests(unittest.TestCase):
def test_arg_kwarg_varargs(self):
with self.assertRaisesRegex(ValueError, 'Unsupported'):
@autocommand
def f(one, first=None, *args):
pass
def test_arg_varargs_kwarg(self):
@autocommand
def f(one, *args, first=None):
self.assertEqual(one, 'ONE')
self.assertEqual(args[0], 'VONE')
self.assertEqual(args[1], 'VTWO')
self.assertEqual(len(args), 2)
self.assertEqual(first, 'FIRST')
f(argv='ONE VONE VTWO --first FIRST')
def test_arg_varkwarg(self):
@autocommand
def f(one, **kwargs):
self.assertEqual(one, 'ONE')
self.assertEqual(kwargs['kwone'], 'KWONE')
self.assertEqual(kwargs['kwtwo'], 'KWTWO')
self.assertEqual(len(kwargs), 2)
f(argv='ONE --kwargs --kwone KWONE kwtwo=KWTWO')
def test_arg_kwarg_defaultfailback(self):
@autocommand
def f(one, first='DEFAULT'):
self.assertEqual(one, 'ONE')
self.assertEqual(first, 'DEFAULT')
f(argv='ONE')
def test_arg_kwarg(self):
@autocommand
def f(one, first='DEFAULT'):
self.assertEqual(one, 'ONE')
self.assertEqual(first, 'FIRST')
f(argv='ONE --first FIRST')
def test_arg_varargs_kwarg_varkwargs(self):
@autocommand
def f(one, *args, first='DEFAULT', **kwargs):
self.assertEqual(one, 'ONE')
self.assertEqual(args[0], 'VONE')
self.assertEqual(args[1], 'VTWO')
self.assertEqual(first, 'FIRST')
self.assertEqual(kwargs['kwone'], 'KWONE')
self.assertEqual(kwargs['kwtwo'], 'KWTWO')
self.assertEqual(len(kwargs), 2)
f(argv='ONE VONE VTWO --first FIRST --kwargs kwone=KWONE kwtwo=KWTWO')
def test_empty_var_and_keyword_args(self):
@autocommand
def f(*args, **kwargs):
self.assertEqual(len(args), 0)
self.assertEqual(len(kwargs), 0)
f(argv='')
class TypeTests(unittest.TestCase):
""" Tests where the arguments gather type info from the function
signature. """
def test_annotation_str(self):
@autocommand
def f(one: str):
self.assertIsInstance(one, str)
f(argv='asdf')
def test_pos_annotation_bool(self):
@autocommand
def f(one: bool):
self.assertIsInstance(one, bool)
return one
for x in ('False', 'no', 'disable', '0', 'null', 'None'):
with self.subTest(x=x):
self.assertFalse(f(argv=x))
for x in ('1', 'yes', 'True', 'randomthing'):
with self.subTest(x=x):
self.assertTrue(f(argv=x))
def test_kw_bool_toggle(self):
""" Bool keywords operate like toggles. When present on the arg line, they
invert the default. """
@autocommand
def f(toggle=False):
self.assertIsInstance(toggle, bool)
return toggle
self.assertFalse(f(argv=''))
self.assertTrue(f(argv='--toggle'))
@autocommand
def f(toggle=True):
self.assertIsInstance(toggle, bool)
return toggle
self.assertTrue(f(argv=''))
self.assertFalse(f(argv='--toggle'))
def test_annotation_int(self):
@autocommand
def f(one: int):
self.assertIsInstance(one, int)
return one
for x in [0, 1, 2**1024, -1, -2]:
self.assertEqual(f(argv=str(x)), x)
for x in ['nope', '0x0', '0o0']:
with self.assertRaises(SystemExit):
f(argv=x)
def test_annotation_float(self):
@autocommand
def f(one: float):
self.assertIsInstance(one, float)
return one
for x in [0, 1, 1.1]:
self.assertEqual(f(argv=str(x)), x)
for x in ['nope']:
with self.assertRaises(SystemExit):
f(argv=x)
class Nesting(unittest.TestCase):
def test_one_level(self):
@autocommand
def main():
return 'main'
@autocommand
def sub():
return 'sub'
main.add_subcommand(sub)
self.assertEqual(main(argv=''), 'main')
self.assertEqual(main(argv='sub'), 'sub') | test/autocommand.py | from shellish import autocommand
import unittest
class PositionalTests(unittest.TestCase):
def test_one_pos(self):
@autocommand
def f(one):
self.assertEqual(one, 'ONE')
f(argv='ONE')
with self.assertRaises(SystemExit):
f(argv='')
def test_2_and_3_pos(self):
@autocommand
def f2(one, two):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
f2(argv='ONE TWO')
with self.assertRaises(SystemExit):
f2(argv='ONE')
@autocommand
def f3(one, two, three):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
self.assertEqual(three, 'THREE')
f3(argv='ONE TWO THREE')
with self.assertRaises(SystemExit):
f3(argv='ONE TWO')
def test_only_varargs(self):
@autocommand
def f(*args):
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='ONE TWO')
def test_one_pos_and_varargs(self):
@autocommand
def f(one, *args):
self.assertEqual(one, 'posONE')
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='posONE ONE TWO')
def test_2_pos_and_varargs(self):
@autocommand
def f(one, two, *args):
self.assertEqual(one, 'posONE')
self.assertEqual(two, 'posTWO')
self.assertEqual(args[0], 'ONE')
self.assertEqual(args[1], 'TWO')
self.assertEqual(len(args), 2)
f(argv='posONE posTWO ONE TWO')
def test_empty_varargs(self):
@autocommand
def f(*args):
self.assertEqual(len(args), 0)
f(argv='')
class KeywordTests(unittest.TestCase):
def test_one_keyword(self):
@autocommand
def f(one=None):
self.assertEqual(one, 'ONE')
f(argv='--one ONE')
def test_two_keywords(self):
@autocommand
def f(one=None, two=None):
self.assertEqual(one, 'ONE')
self.assertEqual(two, 'TWO')
f(argv='--one ONE --two TWO')
def test_only_varkwargs(self):
@autocommand
def f(**kwargs):
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(len(kwargs), 2)
f(argv='--kwargs --one ONE --two TWO')
def test_onekw_and_varkwargs(self):
@autocommand
def f(first=None, **kwargs):
self.assertEqual(first, 'FIRST')
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(len(kwargs), 2)
f(argv='--first FIRST --kwargs --one ONE --two TWO')
def test_varkwargs_mixed_patterns(self):
@autocommand
def f(**kwargs):
self.assertEqual(kwargs['one'], 'ONE')
self.assertEqual(kwargs['two'], 'TWO')
self.assertEqual(kwargs['three'], 'THREE')
self.assertEqual(len(kwargs), 3)
f(argv='--kwargs --one ONE --two TWO --three THREE')
f(argv='--kwargs one=ONE two=TWO three=THREE')
f(argv='--kwargs --one ONE two=TWO --three THREE')
f(argv='--kwargs one=ONE --two TWO three=THREE')
def test_empty_varkwargs(self):
@autocommand
def f(**kwargs):
self.assertEqual(len(kwargs), 0)
f(argv='')
class CombinationTests(unittest.TestCase):
def test_arg_kwarg_varargs(self):
with self.assertRaisesRegex(ValueError, 'Unsupported'):
@autocommand
def f(one, first=None, *args):
pass
def test_arg_varargs_kwarg(self):
@autocommand
def f(one, *args, first=None):
self.assertEqual(one, 'ONE')
self.assertEqual(args[0], 'VONE')
self.assertEqual(args[1], 'VTWO')
self.assertEqual(len(args), 2)
self.assertEqual(first, 'FIRST')
f(argv='ONE VONE VTWO --first FIRST')
def test_arg_varkwarg(self):
@autocommand
def f(one, **kwargs):
self.assertEqual(one, 'ONE')
self.assertEqual(kwargs['kwone'], 'KWONE')
self.assertEqual(kwargs['kwtwo'], 'KWTWO')
self.assertEqual(len(kwargs), 2)
f(argv='ONE --kwargs --kwone KWONE kwtwo=KWTWO')
def test_arg_kwarg_defaultfailback(self):
@autocommand
def f(one, first='DEFAULT'):
self.assertEqual(one, 'ONE')
self.assertEqual(first, 'DEFAULT')
f(argv='ONE')
def test_arg_kwarg(self):
@autocommand
def f(one, first='DEFAULT'):
self.assertEqual(one, 'ONE')
self.assertEqual(first, 'FIRST')
f(argv='ONE --first FIRST')
def test_arg_varargs_kwarg_varkwargs(self):
@autocommand
def f(one, *args, first='DEFAULT', **kwargs):
self.assertEqual(one, 'ONE')
self.assertEqual(args[0], 'VONE')
self.assertEqual(args[1], 'VTWO')
self.assertEqual(first, 'FIRST')
self.assertEqual(kwargs['kwone'], 'KWONE')
self.assertEqual(kwargs['kwtwo'], 'KWTWO')
self.assertEqual(len(kwargs), 2)
f(argv='ONE VONE VTWO --first FIRST --kwargs kwone=KWONE kwtwo=KWTWO')
def test_empty_var_and_keyword_args(self):
@autocommand
def f(*args, **kwargs):
self.assertEqual(len(args), 0)
self.assertEqual(len(kwargs), 0)
f(argv='')
class TypeTests(unittest.TestCase):
""" Tests where the arguments gather type info from the function
signature. """
def test_annotation_str(self):
@autocommand
def f(one: str):
self.assertIsInstance(one, str)
f(argv='asdf')
def test_pos_annotation_bool(self):
@autocommand
def f(one: bool):
self.assertIsInstance(one, bool)
return one
for x in ('False', 'no', 'disable', '0', 'null', 'None'):
with self.subTest(x=x):
self.assertFalse(f(argv=x))
for x in ('1', 'yes', 'True', 'randomthing'):
with self.subTest(x=x):
self.assertTrue(f(argv=x))
def test_kw_bool_toggle(self):
""" Bool keywords operate like toggles. When present on the arg line, they
invert the default. """
@autocommand
def f(toggle=False):
self.assertIsInstance(toggle, bool)
return toggle
self.assertFalse(f(argv=''))
self.assertTrue(f(argv='--toggle'))
@autocommand
def f(toggle=True):
self.assertIsInstance(toggle, bool)
return toggle
self.assertTrue(f(argv=''))
self.assertFalse(f(argv='--toggle'))
def test_annotation_int(self):
@autocommand
def f(one: int):
self.assertIsInstance(one, int)
return one
for x in [0, 1, 2**1024, -1, -2]:
self.assertEqual(f(argv=str(x)), x)
for x in ['nope', '0x0', '0o0']:
with self.assertRaises(SystemExit):
f(argv=x)
def test_annotation_float(self):
@autocommand
def f(one: float):
self.assertIsInstance(one, float)
return one
for x in [0, 1, 1.1]:
self.assertEqual(f(argv=str(x)), x)
for x in ['nope']:
with self.assertRaises(SystemExit):
f(argv=x)
class Nesting(unittest.TestCase):
def test_one_level(self):
@autocommand
def main():
return 'main'
@autocommand
def sub():
return 'sub'
main.add_subcommand(sub)
self.assertEqual(main(argv=''), 'main')
self.assertEqual(main(argv='sub'), 'sub') | 0.520984 | 0.495911 |
import os
import pickle
from typing import Callable, Optional, TypeVar
import gym
from imitation.util import networks, util
from stable_baselines.common import vec_env
import tensorflow as tf
from evaluating_rewards import serialize
from evaluating_rewards.rewards import base
T = TypeVar("T")
V = TypeVar("V")
EnvRewardFactory = Callable[[gym.Space, gym.Space], base.RewardModel]
DEFAULT_CONFIG = {
"env_name": "evaluating_rewards/PointMassLine-v0",
"discount": 0.99,
"target_reward_type": "evaluating_rewards/Zero-v0",
"target_reward_path": "dummy",
"model_reward_type": base.MLPRewardModel,
}
def logging_config(log_root, env_name):
log_dir = os.path.join(log_root, env_name.replace("/", "_"), util.make_unique_timestamp())
_ = locals() # quieten flake8 unused variable warning
del _
MakeModelFn = Callable[[vec_env.VecEnv], T]
MakeTrainerFn = Callable[[base.RewardModel, tf.VariableScope, base.RewardModel], T]
DoTrainingFn = Callable[[base.RewardModel, T, Optional[base.Callback]], V]
def make_model(model_reward_type: EnvRewardFactory, venv: vec_env.VecEnv) -> base.RewardModel:
return model_reward_type(venv.observation_space, venv.action_space)
def regress(
seed: int,
# Dataset
env_name: str,
discount: float,
# Target specification
target_reward_type: str,
target_reward_path: str,
# Model parameters
make_source: MakeModelFn,
source_init: bool,
make_trainer: MakeTrainerFn,
do_training: DoTrainingFn,
# Logging
log_dir: str,
checkpoint_interval: int,
) -> V:
"""Train a model on target and save the results, reporting training stats."""
# This venv is needed by serialize.load_reward, but is never stepped.
venv = vec_env.DummyVecEnv([lambda: gym.make(env_name)])
with networks.make_session() as (_, sess):
tf.random.set_random_seed(seed)
with tf.variable_scope("source") as model_scope:
model = make_source(venv)
with tf.variable_scope("target"):
target = serialize.load_reward(target_reward_type, target_reward_path, venv, discount)
with tf.variable_scope("train") as train_scope:
trainer = make_trainer(model, model_scope, target)
# Do not initialize any variables from target, which have already been
# set during serialization.
init_vars = train_scope.global_variables()
if source_init:
init_vars += model_scope.global_variables()
sess.run(tf.initializers.variables(init_vars))
def callback(epoch: int) -> None:
if checkpoint_interval > 0 and epoch % checkpoint_interval == 0:
trainer.model.save(os.path.join(log_dir, "checkpoints", f"{epoch:05d}"))
stats = do_training(target, trainer, callback)
# Trainer may wrap source, so save `trainer.model` not source directly
# (see e.g. RegressWrappedModel).
trainer.model.save(os.path.join(log_dir, "checkpoints", "final"))
with open(os.path.join(log_dir, "stats.pkl"), "wb") as f:
pickle.dump(stats, f)
return stats | src/evaluating_rewards/scripts/regress_utils.py | import os
import pickle
from typing import Callable, Optional, TypeVar
import gym
from imitation.util import networks, util
from stable_baselines.common import vec_env
import tensorflow as tf
from evaluating_rewards import serialize
from evaluating_rewards.rewards import base
T = TypeVar("T")
V = TypeVar("V")
EnvRewardFactory = Callable[[gym.Space, gym.Space], base.RewardModel]
DEFAULT_CONFIG = {
"env_name": "evaluating_rewards/PointMassLine-v0",
"discount": 0.99,
"target_reward_type": "evaluating_rewards/Zero-v0",
"target_reward_path": "dummy",
"model_reward_type": base.MLPRewardModel,
}
def logging_config(log_root, env_name):
log_dir = os.path.join(log_root, env_name.replace("/", "_"), util.make_unique_timestamp())
_ = locals() # quieten flake8 unused variable warning
del _
MakeModelFn = Callable[[vec_env.VecEnv], T]
MakeTrainerFn = Callable[[base.RewardModel, tf.VariableScope, base.RewardModel], T]
DoTrainingFn = Callable[[base.RewardModel, T, Optional[base.Callback]], V]
def make_model(model_reward_type: EnvRewardFactory, venv: vec_env.VecEnv) -> base.RewardModel:
return model_reward_type(venv.observation_space, venv.action_space)
def regress(
seed: int,
# Dataset
env_name: str,
discount: float,
# Target specification
target_reward_type: str,
target_reward_path: str,
# Model parameters
make_source: MakeModelFn,
source_init: bool,
make_trainer: MakeTrainerFn,
do_training: DoTrainingFn,
# Logging
log_dir: str,
checkpoint_interval: int,
) -> V:
"""Train a model on target and save the results, reporting training stats."""
# This venv is needed by serialize.load_reward, but is never stepped.
venv = vec_env.DummyVecEnv([lambda: gym.make(env_name)])
with networks.make_session() as (_, sess):
tf.random.set_random_seed(seed)
with tf.variable_scope("source") as model_scope:
model = make_source(venv)
with tf.variable_scope("target"):
target = serialize.load_reward(target_reward_type, target_reward_path, venv, discount)
with tf.variable_scope("train") as train_scope:
trainer = make_trainer(model, model_scope, target)
# Do not initialize any variables from target, which have already been
# set during serialization.
init_vars = train_scope.global_variables()
if source_init:
init_vars += model_scope.global_variables()
sess.run(tf.initializers.variables(init_vars))
def callback(epoch: int) -> None:
if checkpoint_interval > 0 and epoch % checkpoint_interval == 0:
trainer.model.save(os.path.join(log_dir, "checkpoints", f"{epoch:05d}"))
stats = do_training(target, trainer, callback)
# Trainer may wrap source, so save `trainer.model` not source directly
# (see e.g. RegressWrappedModel).
trainer.model.save(os.path.join(log_dir, "checkpoints", "final"))
with open(os.path.join(log_dir, "stats.pkl"), "wb") as f:
pickle.dump(stats, f)
return stats | 0.863305 | 0.175962 |
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from msrestazure.tools import resource_id
class AmsAccountIdentityTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_create_system_identity(self, resource_group, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'centralus',
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location} --mi-system-assigned --default-action Allow', checks=[
self.check('name', '{amsname}'),
self.check('location', 'Central US'),
self.check('identity.type', 'SystemAssigned')
])
list = self.cmd('az ams account list -g {}'.format(resource_group)).get_output_in_json()
assert len(list) > 0
self.cmd('az ams account show -n {amsname} -g {rg}', checks=[
self.check('name', '{amsname}'),
self.check('resourceGroup', '{rg}'),
self.check('identity.type', 'SystemAssigned')
])
self.cmd('az ams account delete -n {amsname} -g {rg}')
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_add_user_identity(self, resource_group, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
userIdName = self.create_random_name(prefix='userId', length=10)
self.kwargs.update({
'amsname': amsname,
'userIdName': userIdName,
'storageAccount': storage_account_for_create,
'location': 'centralus',
'subscription': self.get_subscription_id(),
'userIdentity': resource_id(resource_group= resource_group,
subscription=self.get_subscription_id(),
name=userIdName,
namespace='Microsoft.ManagedIdentity',
type='userAssignedIdentities')
})
self.cmd(
'az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location} --mi-system-assigned --default-action Allow',
checks=[
self.check('name', '{amsname}'),
self.check('location', 'Central US'),
self.check('identity.type', 'SystemAssigned')
])
self.cmd(
'az ams account identity remove -n {amsname} -g {rg} --system-assigned',
checks=[
self.check('identity.type', 'None')
]
)
self.cmd(
'az identity create -n {userIdName} -g {rg} -l {location}'
)
self.cmd('az ams account identity assign -n {amsname} -g {rg} --user-assigned {userIdentity}',
checks=[
self.check('identity.type', 'UserAssigned')
])
self.cmd('az ams account delete -n {amsname} -g {rg}') | src/azure-cli/azure/cli/command_modules/ams/tests/latest/test_ams_account_identity_scenarios.py |
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from msrestazure.tools import resource_id
class AmsAccountIdentityTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_create_system_identity(self, resource_group, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'centralus',
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location} --mi-system-assigned --default-action Allow', checks=[
self.check('name', '{amsname}'),
self.check('location', 'Central US'),
self.check('identity.type', 'SystemAssigned')
])
list = self.cmd('az ams account list -g {}'.format(resource_group)).get_output_in_json()
assert len(list) > 0
self.cmd('az ams account show -n {amsname} -g {rg}', checks=[
self.check('name', '{amsname}'),
self.check('resourceGroup', '{rg}'),
self.check('identity.type', 'SystemAssigned')
])
self.cmd('az ams account delete -n {amsname} -g {rg}')
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
def test_ams_add_user_identity(self, resource_group, storage_account_for_create):
amsname = self.create_random_name(prefix='ams', length=12)
userIdName = self.create_random_name(prefix='userId', length=10)
self.kwargs.update({
'amsname': amsname,
'userIdName': userIdName,
'storageAccount': storage_account_for_create,
'location': 'centralus',
'subscription': self.get_subscription_id(),
'userIdentity': resource_id(resource_group= resource_group,
subscription=self.get_subscription_id(),
name=userIdName,
namespace='Microsoft.ManagedIdentity',
type='userAssignedIdentities')
})
self.cmd(
'az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location} --mi-system-assigned --default-action Allow',
checks=[
self.check('name', '{amsname}'),
self.check('location', 'Central US'),
self.check('identity.type', 'SystemAssigned')
])
self.cmd(
'az ams account identity remove -n {amsname} -g {rg} --system-assigned',
checks=[
self.check('identity.type', 'None')
]
)
self.cmd(
'az identity create -n {userIdName} -g {rg} -l {location}'
)
self.cmd('az ams account identity assign -n {amsname} -g {rg} --user-assigned {userIdentity}',
checks=[
self.check('identity.type', 'UserAssigned')
])
self.cmd('az ams account delete -n {amsname} -g {rg}') | 0.550124 | 0.142053 |
import pytest
import responses
import renamedia.tmdb.client as testee
from renamedia.common.model import MediaType
from renamedia.tmdb.model import TmdbItem
_FIND_URL = ('https://api.themoviedb.org/3/search/multi?api_key=12345'
'&language=en-US&page=1&include_adult=false&query=Title')
_GET_URL = ('https://api.themoviedb.org/3/tv/4546/season/01'
'?api_key=12345&language=en-US')
@pytest.fixture
def mock_env(mocker):
mocker.patch.dict('os.environ', {"TMDB_API_KEY": "12345"})
@responses.activate
def test_find_single_result(mock_env, tv_media_items, tmdb_item):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"original_name": "Title",
"genre_ids": [
35
],
"media_type": "tv",
"name": "Title",
"popularity": 36.208,
"origin_country": [
"US"
],
"vote_count": 368,
"first_air_date": "2000-10-15",
"backdrop_path": "/eNSIkGIYqXVFNmT85P4X7BsXkYI.jpg",
"original_language": "en",
"id": 4546,
"vote_average": 8.1,
"overview": "The off-kilter, unscripted comic ...",
"poster_path": "/kWQDOnLs5DK0ta8xQZLsaienIHp.jpg"
}
]
},
status=200,
match_querystring=True
)
actual = testee.find(tv_media_items[0])
assert actual == tmdb_item
@responses.activate
def test_find_multiple_results(mock_env, tv_media_items, tmdb_item):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"original_name": "Title",
"genre_ids": [
35
],
"media_type": "tv",
"name": "Title",
"popularity": 36.208,
"origin_country": [
"US"
],
"vote_count": 368,
"first_air_date": "2000-10-15",
"backdrop_path": "/eNSIkGIYqXVFNmT85P4X7BsXkYI.jpg",
"original_language": "en",
"id": 4546,
"vote_average": 8.1,
"overview": "The off-kilter, unscripted comic ...",
"poster_path": "/kWQDOnLs5DK0ta8xQZLsaienIHp.jpg"
},
{
"original_name": "The Sopranos",
"genre_ids": [
18
],
"media_type": "tv",
"name": "The Sopranos",
"popularity": 51.79,
"origin_country": [
"US"
],
"vote_count": 1162,
"first_air_date": "1999-01-10",
"backdrop_path": "/3ltpFyIfAtGjRMRJdECFoQQCfzx.jpg",
"original_language": "en",
"id": 1398,
"vote_average": 8.4,
"overview": "The story of New Jersey-based ...",
"poster_path": "/6nNZnnUkXcI3DvdrkclulanYXzg.jpg"
}
]
},
status=200,
match_querystring=True
)
actual = testee.find(tv_media_items[0])
assert actual == tmdb_item
@responses.activate
def test_find_other_result_and_continue(mock_env, mocker, tv_media_items):
expected = TmdbItem(id=4546, name='Other title', type=MediaType.tv)
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"id": 4546,
"name": "Other title",
"media_type": "tv"
}
]
},
status=200,
match_querystring=True
)
mocker.patch('builtins.input', return_value='y')
actual = testee.find(tv_media_items[0])
assert actual == expected
@responses.activate
def test_find_other_result_and_abort(mock_env, mocker, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"id": 4546,
"name": "Other title",
"media_type": "tv"
}
]
},
status=200,
match_querystring=True
)
mocker.patch('builtins.input', return_value='n')
with pytest.raises(SystemExit) as pytest_wrapped_e:
testee.find(tv_media_items[0])
assert pytest_wrapped_e.type == SystemExit
@responses.activate
def test_find_no_results(mock_env, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": []
},
status=200,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.find(tv_media_items[0])
assert (error.value.args[0] ==
'No media information found for title [Title]')
@responses.activate
def test_find_not_found(mock_env, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"success": False,
"status_code": 34,
"status_message": "The resource you requested could not be found."
},
status=404,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.find(tv_media_items[0])
assert (error.value.args[0] ==
'Error obtaining media information. '
'Status: [404], reason: [Not Found]')
@responses.activate
def test_get_episodes_for_season(mock_env, tmdb_item, tv_episodes):
responses.add(
responses.GET,
_GET_URL,
json={
"_id": "52576f03760ee36aaa44ffcf",
"air_date": "2000-10-15",
"episodes": [
{
"air_date": "2000-10-15",
"episode_number": 1,
"id": 324160,
"name": "Episode 1",
"overview": "An innocent bunch-up in Larry's ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/1bFepeqNpx940YCTh0mI2tHgZCa.jpg",
"vote_average": 7.167,
"vote_count": 12,
},
{
"air_date": "2000-10-22",
"episode_number": 2,
"id": 324162,
"name": "Episode 2",
"overview": "Larry and Cheryl's fun-filled bowling ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/714jnxgQqyowldnKMmNEomzeE3j.jpg",
"vote_average": 7.667,
"vote_count": 9
},
{
"air_date": "2000-10-29",
"episode_number": 3,
"id": 324169,
"name": "Episode 3",
"overview": "Larry sets off a bizarre chain of ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/3rrhO0WLNiBceTtIaVzneYbfxu0.jpg",
"vote_average": 8.0,
"vote_count": 9
}
]
},
status=200,
match_querystring=True
)
actual = testee.get_episodes_for_season(tmdb_item, '01')
assert actual == tv_episodes
@responses.activate
def test_get_episodes_for_season_not_found(mock_env, tmdb_item, tv_episodes):
responses.add(
responses.GET,
_GET_URL,
json={
"success": False,
"status_code": 34,
"status_message": "The resource you requested could not be found."
},
status=404,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.get_episodes_for_season(tmdb_item, '01')
assert (error.value.args[0] ==
'Error obtaining media information. '
'Status: [404], reason: [Not Found]') | tests/renamedia/tmdb/test_client.py | import pytest
import responses
import renamedia.tmdb.client as testee
from renamedia.common.model import MediaType
from renamedia.tmdb.model import TmdbItem
_FIND_URL = ('https://api.themoviedb.org/3/search/multi?api_key=12345'
'&language=en-US&page=1&include_adult=false&query=Title')
_GET_URL = ('https://api.themoviedb.org/3/tv/4546/season/01'
'?api_key=12345&language=en-US')
@pytest.fixture
def mock_env(mocker):
mocker.patch.dict('os.environ', {"TMDB_API_KEY": "12345"})
@responses.activate
def test_find_single_result(mock_env, tv_media_items, tmdb_item):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"original_name": "Title",
"genre_ids": [
35
],
"media_type": "tv",
"name": "Title",
"popularity": 36.208,
"origin_country": [
"US"
],
"vote_count": 368,
"first_air_date": "2000-10-15",
"backdrop_path": "/eNSIkGIYqXVFNmT85P4X7BsXkYI.jpg",
"original_language": "en",
"id": 4546,
"vote_average": 8.1,
"overview": "The off-kilter, unscripted comic ...",
"poster_path": "/kWQDOnLs5DK0ta8xQZLsaienIHp.jpg"
}
]
},
status=200,
match_querystring=True
)
actual = testee.find(tv_media_items[0])
assert actual == tmdb_item
@responses.activate
def test_find_multiple_results(mock_env, tv_media_items, tmdb_item):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"original_name": "Title",
"genre_ids": [
35
],
"media_type": "tv",
"name": "Title",
"popularity": 36.208,
"origin_country": [
"US"
],
"vote_count": 368,
"first_air_date": "2000-10-15",
"backdrop_path": "/eNSIkGIYqXVFNmT85P4X7BsXkYI.jpg",
"original_language": "en",
"id": 4546,
"vote_average": 8.1,
"overview": "The off-kilter, unscripted comic ...",
"poster_path": "/kWQDOnLs5DK0ta8xQZLsaienIHp.jpg"
},
{
"original_name": "The Sopranos",
"genre_ids": [
18
],
"media_type": "tv",
"name": "The Sopranos",
"popularity": 51.79,
"origin_country": [
"US"
],
"vote_count": 1162,
"first_air_date": "1999-01-10",
"backdrop_path": "/3ltpFyIfAtGjRMRJdECFoQQCfzx.jpg",
"original_language": "en",
"id": 1398,
"vote_average": 8.4,
"overview": "The story of New Jersey-based ...",
"poster_path": "/6nNZnnUkXcI3DvdrkclulanYXzg.jpg"
}
]
},
status=200,
match_querystring=True
)
actual = testee.find(tv_media_items[0])
assert actual == tmdb_item
@responses.activate
def test_find_other_result_and_continue(mock_env, mocker, tv_media_items):
expected = TmdbItem(id=4546, name='Other title', type=MediaType.tv)
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"id": 4546,
"name": "Other title",
"media_type": "tv"
}
]
},
status=200,
match_querystring=True
)
mocker.patch('builtins.input', return_value='y')
actual = testee.find(tv_media_items[0])
assert actual == expected
@responses.activate
def test_find_other_result_and_abort(mock_env, mocker, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": [
{
"id": 4546,
"name": "Other title",
"media_type": "tv"
}
]
},
status=200,
match_querystring=True
)
mocker.patch('builtins.input', return_value='n')
with pytest.raises(SystemExit) as pytest_wrapped_e:
testee.find(tv_media_items[0])
assert pytest_wrapped_e.type == SystemExit
@responses.activate
def test_find_no_results(mock_env, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"page": 1,
"total_results": 0,
"total_pages": 0,
"results": []
},
status=200,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.find(tv_media_items[0])
assert (error.value.args[0] ==
'No media information found for title [Title]')
@responses.activate
def test_find_not_found(mock_env, tv_media_items):
responses.add(
responses.GET,
_FIND_URL,
json={
"success": False,
"status_code": 34,
"status_message": "The resource you requested could not be found."
},
status=404,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.find(tv_media_items[0])
assert (error.value.args[0] ==
'Error obtaining media information. '
'Status: [404], reason: [Not Found]')
@responses.activate
def test_get_episodes_for_season(mock_env, tmdb_item, tv_episodes):
responses.add(
responses.GET,
_GET_URL,
json={
"_id": "52576f03760ee36aaa44ffcf",
"air_date": "2000-10-15",
"episodes": [
{
"air_date": "2000-10-15",
"episode_number": 1,
"id": 324160,
"name": "Episode 1",
"overview": "An innocent bunch-up in Larry's ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/1bFepeqNpx940YCTh0mI2tHgZCa.jpg",
"vote_average": 7.167,
"vote_count": 12,
},
{
"air_date": "2000-10-22",
"episode_number": 2,
"id": 324162,
"name": "Episode 2",
"overview": "Larry and Cheryl's fun-filled bowling ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/714jnxgQqyowldnKMmNEomzeE3j.jpg",
"vote_average": 7.667,
"vote_count": 9
},
{
"air_date": "2000-10-29",
"episode_number": 3,
"id": 324169,
"name": "Episode 3",
"overview": "Larry sets off a bizarre chain of ...",
"production_code": "",
"season_number": 1,
"show_id": 4546,
"still_path": "/3rrhO0WLNiBceTtIaVzneYbfxu0.jpg",
"vote_average": 8.0,
"vote_count": 9
}
]
},
status=200,
match_querystring=True
)
actual = testee.get_episodes_for_season(tmdb_item, '01')
assert actual == tv_episodes
@responses.activate
def test_get_episodes_for_season_not_found(mock_env, tmdb_item, tv_episodes):
responses.add(
responses.GET,
_GET_URL,
json={
"success": False,
"status_code": 34,
"status_message": "The resource you requested could not be found."
},
status=404,
match_querystring=True
)
with pytest.raises(ValueError) as error:
testee.get_episodes_for_season(tmdb_item, '01')
assert (error.value.args[0] ==
'Error obtaining media information. '
'Status: [404], reason: [Not Found]') | 0.452052 | 0.258835 |
from __future__ import absolute_import, division, print_function, unicode_literals
import math
from numbers import Integral
import numpy as np
import pickle
import spectral as spy
from ..io.spyfile import SpyFile, TransformedImage
from ..utilities.errors import has_nan, NaNValueError
from .spymath import matrix_sqrt
from .transforms import LinearTransform
class Iterator:
'''
Base class for iterators over pixels (spectra).
'''
def __init__(self):
pass
def __iter__(self):
raise NotImplementedError('Must override __iter__ in child class.')
def get_num_elements(self):
raise NotImplementedError(
'Must override get_num_elements in child class.')
def get_num_bands(self):
raise NotImplementedError(
'Must override get_num_bands in child class.')
class ImageIterator(Iterator):
'''
An iterator over all pixels in an image.
'''
def __init__(self, im):
self.image = im
self.numElements = im.shape[0] * im.shape[1]
def get_num_elements(self):
return self.numElements
def get_num_bands(self):
return self.image.shape[2]
def __iter__(self):
(M, N) = self.image.shape[:2]
count = 0
for i in range(M):
self.row = i
for j in range(N):
self.col = j
yield self.image[i, j]
class ImageMaskIterator(Iterator):
'''
An iterator over all pixels in an image corresponding to a specified mask.
'''
def __init__(self, image, mask, index=None):
if mask.shape != image.shape[:len(mask.shape)]:
raise ValueError('Mask shape does not match image.')
self.image = image
self.index = index
# Get the proper mask for the training set
if index:
self.mask = np.equal(mask, index)
else:
self.mask = np.not_equal(mask, 0)
self.n_elements = sum(self.mask.ravel())
def get_num_elements(self):
return self.n_elements
def get_num_bands(self):
return self.image.shape[2]
def __iter__(self):
coords = np.argwhere(self.mask)
for (i, j) in coords:
(self.row, self.col) = (i, j)
yield self.image[i, j].astype(self.image.dtype).squeeze()
def iterator(image, mask=None, index=None):
'''
Returns an iterator over pixels in the image.
Arguments:
`image` (ndarray or :class:`spectral.Image`):
An image over whose pixels will be iterated.
`mask` (ndarray) [default None]:
An array of integers that specify over which pixels in `image`
iteration should be performed.
`index` (int) [default None]:
Specifies which value in `mask` should be used for iteration.
Returns (:class:`spectral.Iterator`):
An iterator over image pixels.
If neither `mask` nor `index` are defined, iteration is performed over all
pixels. If `mask` (but not `index`) is defined, iteration is performed
over all pixels for which `mask` is nonzero. If both `mask` and `index`
are defined, iteration is performed over all pixels `image[i,j]` for which
`mask[i,j] == index`.
'''
if isinstance(image, Iterator):
return image
elif mask is not None:
return ImageMaskIterator(image, mask, index)
else:
return ImageIterator(image)
def iterator_ij(mask, index=None):
'''
Returns an iterator over image pixel coordinates for a given mask.
Arguments:
`mask` (ndarray) [default None]:
An array of integers that specify which coordinates should
be returned.
`index` (int) [default None]:
Specifies which value in `mask` should be used for iteration.
Returns:
An iterator over image pixel coordinates. Each returned item is a
2-tuple of the form (row, col).
If `index` is not defined, iteration is performed over all non-zero
elements. If `index` is defined, iteration is performed over all
coordinates for whch `mask[i,j] == index`.
'''
if mask.ndim != 2:
raise ValueError('Invalid mask shape.')
if index is None:
mask = mask != 0
else:
mask = mask == index
for rc in np.argwhere(mask):
yield tuple(rc)
def mean_cov(image, mask=None, index=None):
'''
Return the mean and covariance of the set of vectors.
Usage::
(mean, cov, S) = mean_cov(vectors [, mask=None [, index=None]])
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns a 3-tuple containing:
`mean` (ndarray):
The length-`B` mean vectors
`cov` (ndarray):
The `BxB` unbiased estimate (dividing by N-1) of the covariance
of the vectors.
`S` (int):
Number of samples used to calculate mean & cov
Calculate the mean and covariance of of the given vectors. The argument
can be an Iterator, a SpyFile object, or an `MxNxB` array.
'''
status = spy._status
if isinstance(image, np.ndarray):
X = image.astype(np.float64)
if X.ndim == 3:
X = image.reshape(-1, image.shape[-1]).T
if mask is not None:
mask = mask.ravel()
if index is not None:
ii = np.argwhere(mask == index)
else:
ii = np.argwhere(mask != 0)
X = np.take(X, ii.squeeze(), axis=1)
m = np.average(X, axis=1)
C = np.cov(X)
return (m, C, X.shape[1])
if not isinstance(image, Iterator):
it = iterator(image, mask, index)
else:
it = image
nSamples = it.get_num_elements()
B = it.get_num_bands()
sumX = np.zeros((B,), 'd')
sumX2 = np.zeros((B, B), 'd')
count = 0
statusInterval = max(1, nSamples / 100)
status.display_percentage('Covariance.....')
for x in it:
if not count % statusInterval:
status.update_percentage(float(count) / nSamples * 100.)
count += 1
sumX += x
x = x.astype(np.float64)[:, np.newaxis]
sumX2 += x.dot(x.T)
mean = (sumX / count)
sumX = sumX[:, np.newaxis]
cov = (sumX2 - sumX.dot(sumX.T) / count) / (count - 1)
status.end_percentage()
return (mean, cov, count)
def cov_avg(image, mask, weighted=True):
'''Calculates the covariance averaged over a set of classes.
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (integer-valued ndarray):
Elements specify the classes associated with pixels in `image`.
All pixels associeted with non-zero elements of `mask` will be
used in the covariance calculation.
`weighted` (bool, default True):
Specifies whether the individual class covariances should be
weighted when computing the average. If True, each class will
be weighted by the number of pixels provided for the class;
otherwise, a simple average of the class covariances is performed.
Returns a class-averaged covariance matrix. The number of covariances used
in the average is equal to the number of non-zero elements of `mask`.
'''
ids = set(mask.ravel()) - set((0,))
classes = [calc_stats(image, mask, i) for i in ids]
N = sum([c.nsamples for c in classes])
if weighted:
return np.sum([((c.nsamples - 1) / float(N - 1)) * c.cov
for c in classes], axis=0, dtype=np.float64)
else:
return np.mean([c.cov for c in classes], axis=0, dtype=np.float64)
def covariance(*args):
'''
Returns the covariance of the set of vectors.
Usage::
C = covariance(vectors [, mask=None [, index=None]])
Arguments:
`vectors` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns:
`C` (ndarray):
The `BxB` unbiased estimate (dividing by N-1) of the covariance
of the vectors.
To also return the mean vector and number of samples, call
:func:`~spectral.algorithms.algorithms.mean_cov` instead.
'''
return mean_cov(*args)[1]
class PrincipalComponents:
'''
An object for storing a data set's principal components. The
object has the following members:
`eigenvalues`:
A length B array of eigenvalues sorted in descending order
`eigenvectors`:
A `BxB` array of normalized eigenvectors (in columns)
`stats` (:class:`GaussianStats`):
A statistics object containing `mean`, `cov`, and `nsamples`.
`transform`:
A callable function to transform data to the space of the
principal components.
`reduce`:
A method to return a reduced set of principal components based
on either a fixed number of components or a fraction of total
variance.
`denoise`:
A callable function to denoise data using a reduced set of
principal components.
`get_denoising_transform`:
A callable function that returns a function for denoising data.
'''
def __init__(self, vals, vecs, stats):
self.eigenvalues = vals
self.eigenvectors = vecs
self.stats = stats
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
@property
def mean(self):
return self.stats.mean
@property
def cov(self):
return self.stats.cov
def reduce(self, N=0, **kwargs):
'''Reduces the number of principal components.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to retain. The top `num`
eigenvalues will be retained.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be retained.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be retained (starting from greatest to smallest) until
`fraction` of total image variance is retained.
'''
status = spy._status
num = kwargs.get('num', None)
eigs = kwargs.get('eigs', None)
fraction = kwargs.get('fraction', None)
if num is not None:
return PrincipalComponents(self.eigenvalues[:num],
self.eigenvectors[:, :num],
self.stats)
elif eigs is not None:
vals = self.eigenvalues[eigs]
vecs = self.eigenvectors[:, eigs]
return PrincipalComponents(vals, vecs, self.stats)
elif fraction is not None:
if not 0 < fraction <= 1:
raise Exception('fraction must be in range (0,1].')
N = len(self.eigenvalues)
cumsum = np.cumsum(self.eigenvalues)
sum = cumsum[-1]
# Count how many values to retain.
for i in range(N):
if (cumsum[i] / sum) >= fraction:
break
if i == (N - 1):
# No reduction
status.write('No reduction in eigenvectors achieved.')
return self
vals = self.eigenvalues[:i + 1]
vecs = self.eigenvectors[:, :i + 1]
return PrincipalComponents(vals, vecs, self.stats)
else:
raise Exception('Must specify one of the following keywords:'
'`num`, `eigs`, `fraction`.')
def denoise(self, X, **kwargs):
'''Returns a de-noised version of `X`.
Arguments:
`X` (np.ndarray):
Data to be de-noised. Can be a single pixel or an image.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns denoised image data with same shape as `X`.
Note that calling this method is equivalent to calling the
`get_denoising_transform` method with same keyword and applying the
returned transform to `X`. If you only intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
denoising transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_denoising_transform(**kwargs)
return f(X)
def get_denoising_transform(self, **kwargs):
'''Returns a function for denoising image data.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for denoising image data.
'''
V = self.reduce(self, **kwargs).eigenvectors
f = LinearTransform(V.dot(V.T), pre=-self.mean,
post=self.mean)
return f
def principal_components(image):
'''
Calculate Principal Component eigenvalues & eigenvectors for an image.
Usage::
pc = principal_components(image)
Arguments:
`image` (ndarray, :class:`spectral.Image`, :class:`GaussianStats`):
An `MxNxB` image
Returns a :class:`~spectral.algorithms.algorithms.PrincipalComponents`
object with the following members:
`eigenvalues`:
A length B array of eigenvalues
`eigenvectors`:
A `BxB` array of normalized eigenvectors
`stats` (:class:`GaussianStats`):
A statistics object containing `mean`, `cov`, and `nsamples`.
`transform`:
A callable function to transform data to the space of the
principal components.
`reduce`:
A method to reduce the number of eigenvalues.
`denoise`:
A callable function to denoise data using a reduced set of
principal components.
`get_denoising_transform`:
A callable function that returns a function for denoising data.
'''
if isinstance(image, GaussianStats):
stats = image
else:
stats = calc_stats(image)
(L, V) = np.linalg.eig(stats.cov)
# numpy says eigenvalues may not be sorted so we'll sort them, if needed.
if not np.alltrue(np.diff(L) <= 0):
ii = list(reversed(np.argsort(L)))
L = L[ii]
V = V[:, ii]
return PrincipalComponents(L, V, stats)
class FisherLinearDiscriminant:
'''
An object for storing a data set's linear discriminant data. For `C`
classes with `B`-dimensional data, the object has the following members:
`eigenvalues`:
A length `C-1` array of eigenvalues
`eigenvectors`:
A `BxC` array of normalized eigenvectors
`mean`:
The length `B` mean vector of the image pixels (from all classes)
`cov_b`:
The `BxB` matrix of covariance *between* classes
`cov_w`:
The `BxB` matrix of average covariance *within* each class
`transform`:
A callable function to transform data to the space of the
linear discriminant.
'''
def __init__(self, vals, vecs, mean, cov_b, cov_w):
self.eigenvalues = vals
self.eigenvectors = vecs
self.mean = mean
self.cov_b = cov_b
self.cov_w = cov_w
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
def linear_discriminant(classes, whiten=True):
'''
Solve Fisher's linear discriminant for eigenvalues and eigenvectors.
Usage: (L, V, Cb, Cw) = linear_discriminant(classes)
Arguments:
`classes` (:class:`~spectral.algorithms.TrainingClassSet`):
The set of `C` classes to discriminate.
Returns a `FisherLinearDiscriminant` object containing the within/between-
class covariances, mean vector, and a callable transform to convert data to
the transform's space.
This function determines the solution to the generalized eigenvalue problem
Cb * x = lambda * Cw * x
Since cov_w is normally invertable, the reduces to
(inv(Cw) * Cb) * x = lambda * x
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
C = len(classes) # Number of training sets
rank = len(classes) - 1
classes.calc_stats()
# Calculate total # of training pixels and total mean
N = 0
B = classes.nbands
K = len(classes)
mean = np.zeros(B, dtype=np.float64)
for s in classes:
N += s.size()
mean += s.size() * s.stats.mean
mean /= N
cov_b = np.zeros((B, B), np.float64) # cov between classes
cov_w = np.zeros((B, B), np.float64) # cov within classes
for s in classes:
cov_w += ((s.size() - 1) / float(N - 1)) * s.stats.cov
m = s.stats.mean - mean
cov_b += (s.size() / float(N) / (K - 1)) * np.outer(m, m)
inv_cov_w = np.linalg.inv(cov_w)
(vals, vecs) = np.linalg.eig(inv_cov_w.dot(cov_b))
vals = vals[:rank]
vecs = vecs[:, :rank]
if whiten:
# Diagonalize cov_within in the new space
v = vecs.T.dot(cov_w).dot(vecs)
d = np.sqrt(np.diag(v) * np.diag(v).conj())
for i in range(vecs.shape[1]):
vecs[:, i] /= math.sqrt(d[i].real)
return FisherLinearDiscriminant(vals.real, vecs.real, mean, cov_b, cov_w)
# Alias for Linear Discriminant Analysis (LDA)
lda = linear_discriminant
def log_det(x):
return sum(np.log([eigv for eigv in np.linalg.eigvals(x)
if eigv > 0]))
class GaussianStats(object):
'''A class for storing Gaussian statistics for a data set.
Statistics stored include:
`mean`:
Mean vector
`cov`:
Covariance matrix
`nsamples`:
Number of samples used in computing the statistics
Several derived statistics are computed on-demand (and cached) and are
available as property attributes. These include:
`inv_cov`:
Inverse of the covariance
`sqrt_cov`:
Matrix square root of covariance: sqrt_cov.dot(sqrt_cov) == cov
`sqrt_inv_cov`:
Matrix square root of the inverse of covariance
`log_det_cov`:
The log of the determinant of the covariance matrix
`principal_components`:
The principal components of the data, based on mean and cov.
'''
def __init__(self, mean=None, cov=None, nsamples=None, inv_cov=None):
self.cov = cov
self._inv_cov = inv_cov
self.mean = mean
self.nsamples = nsamples
@property
def cov(self):
'''Property method returning the covariance matrix.'''
return self._cov
@cov.setter
def cov(self, C):
self.reset_derived_stats()
self._cov = C
@property
def inv_cov(self):
'''Property method returning the inverse of the covariance matrix.'''
if self._inv_cov is None:
self._inv_cov = np.linalg.inv(self._cov)
return self._inv_cov
def reset_derived_stats(self):
self._cov = self._inv_cov = None
self._sqrt_cov = self._sqrt_inv_cov = self._pcs = None
self._log_det_cov = None
@property
def sqrt_cov(self):
'''Property method returning the matrix square root of the covariance.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == C.
'''
if self._sqrt_cov is None:
pcs = self.principal_components
self._sqrt_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True)
return self._sqrt_cov
@property
def sqrt_inv_cov(self):
'''Property method returning matrix square root of inverse of cov.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == inv(C).
'''
if self._sqrt_inv_cov is None:
pcs = self.principal_components
self._sqrt_inv_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True,
inverse=True)
return self._sqrt_inv_cov
@property
def principal_components(self):
if self._pcs is None:
(evals, evecs) = np.linalg.eigh(self._cov)
self._pcs = PrincipalComponents(evals, evecs, self)
return self._pcs
@property
def log_det_cov(self):
if self._log_det_cov is None:
evals = self.principal_components.eigenvalues
self._log_det_cov = np.sum(np.log([v for v in evals if v > 0]))
return self._log_det_cov
def transform(self, xform):
'''Returns a version of the stats transformed by a linear transform.'''
if not isinstance(xform, LinearTransform):
raise TypeError('Expected a LinearTransform object.')
m = xform(self.mean)
C = xform._A.dot(self.cov).dot(xform._A.T)
return GaussianStats(mean=m, cov=C, nsamples=self.nsamples)
def get_whitening_transform(self):
'''Returns transform that centers and whitens data for these stats.'''
C_1 = np.linalg.inv(self.cov)
return LinearTransform(matrix_sqrt(C_1, True), pre=-self.mean)
def calc_stats(image, mask=None, index=None, allow_nan=False):
'''Computes Gaussian stats for image data..
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
`allow_nan` (bool, default False):
If True, statistics will be computed even if `np.nan` values are
present in the data; otherwise, `~spectral.algorithms.spymath.NaNValueError`
is raised.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns:
`GaussianStats` object:
This object will have members `mean`, `cov`, and `nsamples`.
'''
(mean, cov, N) = mean_cov(image, mask, index)
if has_nan(mean) and not allow_nan:
raise NaNValueError('NaN values present in data.')
return GaussianStats(mean=mean, cov=cov, nsamples=N)
class TrainingClass:
def __init__(self, image, mask, index=0, class_prob=1.0):
'''Creates a new training class defined by applying `mask` to `image`.
Arguments:
`image` (:class:`spectral.Image` or :class:`numpy.ndarray`):
The `MxNxB` image over which the training class is defined.
`mask` (:class:`numpy.ndarray`):
An `MxN` array of integers that specifies which pixels in
`image` are associated with the class.
`index` (int) [default 0]:
if `index` == 0, all nonzero elements of `mask` are associated
with the class. If `index` is nonzero, all elements of `mask`
equal to `index` are associated with the class.
`class_prob` (float) [default 1.0]:
Defines the prior probability associated with the class, which
is used in maximum likelihood classification. If `classProb`
is 1.0, prior probabilities are ignored by classifiers, giving
all class equal weighting.
'''
self.image = image
if image is not None:
self.nbands = image.shape[2]
self.nbands = None
self.mask = mask
self.index = index
self.class_prob = class_prob
self.stats = None
self._stats_valid = False
def __iter__(self):
'''Returns an iterator over all samples for the class.'''
it = ImageMaskIterator(self.image, self.mask, self.index)
for i in it:
yield i
def stats_valid(self, tf=None):
'''
Sets statistics for the TrainingClass to be valid or invalid.
Arguments:
`tf` (bool or None):
A value evaluating to False indicates that statistics should be
recalculated prior to being used. If the argument is `None`,
a value will be returned indicating whether stats need to be
recomputed.
'''
if tf is None:
return self._stats_valid
self._stats_valid = tf
def size(self):
'''Returns the number of pixels/samples in the training set.'''
# If the stats are invalid, the number of pixels in the
# training set may have changed.
if self._stats_valid:
return self.stats.nsamples
if self.index:
return np.sum(np.equal(self.mask, self.index).ravel())
else:
return np.sum(np.not_equal(self.mask, 0).ravel())
def calc_stats(self):
'''
Calculates statistics for the class.
This function causes the :attr:`stats` attribute of the class to be
updated, where `stats` will have the following attributes:
============= ====================== ===================================
Attribute Type Description
============= ====================== ===================================
`mean` :class:`numpy.ndarray` length-`B` mean vector
`cov` :class:`numpy.ndarray` `BxB` covariance matrix
`inv_cov` :class:`numpy.ndarray` Inverse of `cov`
`log_det_cov` float Natural log of determinant of `cov`
============= ====================== ===================================
'''
self.stats = calc_stats(self.image, self.mask, self.index)
self.nbands = self.image.shape[-1]
self._stats_valid = True
def transform(self, transform):
'''
Perform a linear transformation on the statistics of the training set.
Arguments:
`transform` (:class:numpy.ndarray or LinearTransform):
The linear transform array. If the class has `B` bands, then
`transform` must have shape `(C,B)`.
After `transform` is applied, the class statistics will have `C` bands.
'''
if isinstance(transform, np.ndarray):
transform = LinearTransform(transform)
self.stats.mean = transform(self.stats.mean)
self.stats.cov = np.dot(
transform._A, self.stats.cov).dot(transform._A.T)
self.nbands = transform.dim_out
class SampleIterator:
'''Iterator over all classes and samples in a TrainingClassSet object.'''
def __init__(self, trainingData):
self.classes = trainingData
def __iter__(self):
for cl in self.classes:
for sample in cl:
yield sample
class TrainingClassSet:
'''A class to manage a set of :class:`~spectral.TrainingClass` objects.'''
def __init__(self):
self.classes = {}
self.nbands = None
def __getitem__(self, i):
'''Returns the training class having ID i.'''
return self.classes[i]
def __len__(self):
'''Returns number of training classes in the set.'''
return len(self.classes)
def add_class(self, cl):
'''Adds a new class to the training set.
Arguments:
`cl` (:class:`spectral.TrainingClass`):
`cl.index` must not duplicate a class already in the set.
'''
if cl.index in self.classes:
raise Exception('Attempting to add class with duplicate index.')
self.classes[cl.index] = cl
if not self.nbands:
self.nbands = cl.nbands
def transform(self, X):
'''Applies linear transform, M, to all training classes.
Arguments:
`X` (:class:numpy.ndarray):
The linear transform array. If the classes have `B` bands, then
`X` must have shape `(C,B)`.
After the transform is applied, all classes will have `C` bands.
'''
for cl in list(self.classes.values()):
cl.transform(X)
self.nbands = list(self.classes.values())[0].nbands
def __iter__(self):
'''An iterator over all training classes in the set.'''
for cl in list(self.classes.values()):
yield cl
def all_samples(self):
'''An iterator over all samples in all classes.'''
return SampleIterator(self)
def calc_stats(self):
'''Computes statistics for each class, if not already computed.'''
for c in list(self.classes.values()):
if not c.stats_valid():
c.calc_stats()
self.nbands = list(self.classes.values())[0].nbands
def save(self, filename, calc_stats=False):
for c in list(self.classes.values()):
if c.stats is None:
if calc_stats == False:
msg = 'Class statistics are missing from at least one ' \
'class and are required to save the training class ' \
'data. Call the `save` method with keyword ' \
'`calc_stats=True` if you want to compute them and ' \
'then save the class data.'
raise Exception (msg)
else:
c.calc_stats()
f = open(filename, 'wb')
ids = sorted(self.classes.keys())
pickle.dump(self.classes[ids[0]].mask, f)
pickle.dump(len(self), f)
for id in ids:
c = self.classes[id]
pickle.dump(c.index, f)
pickle.dump(c.stats.cov, f)
pickle.dump(c.stats.mean, f)
pickle.dump(c.stats.nsamples, f)
pickle.dump(c.class_prob, f)
f.close()
def load(self, filename, image):
f = open(filename, 'rb')
mask = pickle.load(f)
nclasses = pickle.load(f)
for i in range(nclasses):
index = pickle.load(f)
cov = pickle.load(f)
mean = pickle.load(f)
nsamples = pickle.load(f)
class_prob = pickle.load(f)
c = TrainingClass(image, mask, index, class_prob)
c.stats = GaussianStats(mean=mean, cov=cov, nsamples=nsamples)
if not (cov is None or mean is None or nsamples is None):
c.stats_valid(True)
c.nbands = len(mean)
self.add_class(c)
f.close
def create_training_classes(image, class_mask, calc_stats=False, indices=None):
'''
Creates a :class:spectral.algorithms.TrainingClassSet: from an indexed array.
USAGE: sets = createTrainingClasses(classMask)
Arguments:
`image` (:class:`spectral.Image` or :class:`numpy.ndarray`):
The image data for which the training classes will be defined.
`image` has shape `MxNxB`.
`class_mask` (:class:`numpy.ndarray`):
A rank-2 array whose elements are indices of various spectral
classes. if `class_mask[i,j]` == `k`, then `image[i,j]` is
assumed to belong to class `k`.
`calc_stats` (bool):
An optional parameter which, if True, causes statistics to be
calculated for all training classes.
Returns:
A :class:`spectral.algorithms.TrainingClassSet` object.
The dimensions of classMask should be the same as the first two dimensions
of the corresponding image. Values of zero in classMask are considered
unlabeled and are not added to a training set.
'''
if indices is not None:
class_indices = set(indices) - set((0,))
else:
class_indices = set(class_mask.ravel()) - set((0,))
classes = TrainingClassSet()
classes.nbands = image.shape[-1]
for i in class_indices:
cl = TrainingClass(image, class_mask, i)
if calc_stats:
cl.calc_stats()
classes.add_class(cl)
return classes
def ndvi(data, red, nir):
'''Calculates Normalized Difference Vegetation Index (NDVI).
Arguments:
`data` (ndarray or :class:`spectral.Image`):
The array or SpyFile for which to calculate the index.
`red` (int or int range):
Index of the red band or an index range for multiple bands.
`nir` (int or int range):
An integer index of the near infrared band or an index range for
multiple bands.
Returns an ndarray:
An array containing NDVI values in the range [-1.0, 1.0] for each
corresponding element of data.
'''
r = data[:, :, red].astype(float)
if len(r.shape) == 3 and r.shape[2] > 1:
r = sum(r, 2) / r.shape[2]
n = data[:, :, nir].astype(float)
if len(n.shape) == 3 and n.shape[2] > 1:
n = sum(n, 2) / n.shape[2]
return (n - r) / (n + r)
def bdist(class1, class2):
'''
Calulates the Bhattacharyya distance between two classes.
USAGE: bd = bdist(class1, class2)
Arguments:
`class1`, `class2` (:class:`~spectral.algorithms.algorithms.TrainingClass`)
Returns:
A float value for the Bhattacharyya Distance between the classes. This
function is aliased to :func:`~spectral.algorithms.algorithms.bDistance`.
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
terms = bdist_terms(class1, class2)
return terms[0] + terms[1]
bDistance = bdist
def bdist_terms(a, b):
'''
Calulate the linear and quadratic terms of the Bhattacharyya distance
between two classes.
USAGE: (linTerm, quadTerm) = bDistanceTerms(a, b)
ARGUMENTS:
(a, b) The classes for which to determine the
B-distance.
RETURN VALUE:
A 2-tuple of the linear and quadratic terms
'''
m = a.stats.mean - b.stats.mean
avgCov = (a.stats.cov + b.stats.cov) / 2
lin_term = (1 / 8.) * np.dot(np.transpose(m), np.dot(np.inv(avgCov), m))
quad_term = 0.5 * (log_det(avgCov)
- 0.5 * a.stats.log_det_cov
- 0.5 * b.stats.log_det_cov)
return (lin_term, float(quad_term))
def transform_image(matrix, image):
'''
Performs linear transformation on all pixels in an image.
Arguments:
matrix (:class:`numpy.ndarray`):
A `CxB` linear transform to apply.
image (:class:`numpy.ndarray` or :class:`spectral.Image`):
Image data to transform
Returns:
If `image` is an `MxNxB` :class:`numpy.ndarray`, the return will be a
transformed :class:`numpy.ndarray` with shape `MxNxC`. If `image` is
:class:`spectral.Image`, the returned object will be a
:class:`spectral.TransformedImage` object and no transformation of data
will occur until elements of the object are accessed.
'''
if isinstance(image, SpyFile):
return TransformedImage(matrix, image)
elif isinstance(image, np.ndarray):
(M, N, B) = image.shape
ximage = np.zeros((M, N, matrix.shape[0]), float)
for i in range(M):
for j in range(N):
ximage[i, j] = np.dot(matrix, image[i, j].astype(float))
return ximage
else:
raise 'Unrecognized image type passed to transform_image.'
def orthogonalize(vecs, start=0):
'''
Performs Gram-Schmidt Orthogonalization on a set of vectors.
Arguments:
`vecs` (:class:`numpy.ndarray`):
The set of vectors for which an orthonormal basis will be created.
If there are `C` vectors of length `B`, `vecs` should be `CxB`.
`start` (int) [default 0]:
If `start` > 0, then `vecs[start]` will be assumed to already be
orthonormal.
Returns:
A new `CxB` containing an orthonormal basis for the given vectors.
'''
(M, N) = vecs.shape
basis = np.array(np.transpose(vecs))
eye = np.identity(N).astype(float)
for i in range(start, M):
if i == 0:
basis[:, 0] /= np.linalg.norm(basis[:, 0])
continue
v = basis[:, i] / np.linalg.norm(basis[:, i])
U = basis[:, :i]
P = eye - U.dot(np.linalg.inv(U.T.dot(U)).dot(U.T))
basis[:, i] = P.dot(v)
basis[:, i] /= np.linalg.norm(basis[:, i])
return np.transpose(basis)
def unmix(data, members):
'''
Perform linear unmixing on image data.
USAGE: mix = unmix(data, members)
ARGUMENTS:
data The MxNxB image data to be unmixed
members An CxB array of C endmembers
RETURN VALUE:
mix An MxNxC array of endmember fractions.
unmix performs linear unmixing on the image data. After calling the
function, mix[:,:,i] will then represent the fractional abundances
for the i'th endmember. If the result of unmix is returned into 'mix',
then an array of indices of greatest fractional endmembers is obtained
by argmax(mix).
Note that depending on endmembers given, fractional abundances for
endmembers may be negative.
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
members = members.astype(float)
# Calculate the pseudo inverse
pi = np.dot(members, np.transpose(members))
pi = np.dot(np.inv(pi), members)
(M, N, B) = data.shape
unmixed = np.zeros((M, N, members.shape[0]), float)
for i in range(M):
for j in range(N):
unmixed[i, j] = np.dot(pi, data[i, j].astype(float))
return unmixed
def spectral_angles(data, members):
'''Calculates spectral angles with respect to given set of spectra.
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of spectral angles.
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmin(angles).
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
m = np.array(members, np.float64)
m /= np.sqrt(np.einsum('ij,ij->i', m, m))[:, np.newaxis]
norms = np.sqrt(np.einsum('ijk,ijk->ij', data, data))
dots = np.einsum('ijk,mk->ijm', data, m)
dots = np.clip(dots / norms[:, :, np.newaxis], -1, 1)
return np.arccos(dots)
def msam(data, members):
'''Modified SAM scores according to Oshigami, et al [1]. Endmembers are
mean-subtracted prior to spectral angle calculation. Results are
normalized such that the maximum value of 1 corresponds to a perfect match
(zero spectral angle).
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of MSAM scores with maximum value of 1 corresponding
to a perfect match (zero spectral angle).
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmax(angles).
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and
<NAME>. 2013. Mineralogical mapping of southern Namibia by application
of continuum-removal MSAM method to the HyMap data. Int. J. Remote Sens.
34, 15 (August 2013), 5282-5295.
'''
# The modifications to the `spectral_angles` function were contributed by
# <NAME>.
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
(M, N, B) = data.shape
m = np.array(members, np.float64)
C = m.shape[0]
# Normalize endmembers
for i in range(C):
# Fisher z trafo type operation
m[i] -= np.mean(m[i])
m[i] /= np.sqrt(m[i].dot(m[i]))
angles = np.zeros((M, N, C), np.float64)
for i in range(M):
for j in range(N):
#Fisher z trafo type operation
v = data[i, j] - np.mean(data[i, j])
v /= np.sqrt(v.dot(v))
v = np.clip(v, -1, 1)
for k in range(C):
# Calculate Mineral Index according to Oshigami et al.
# (Intnl. J. of Remote Sens. 2013)
a = np.clip(v.dot(m[k]), -1, 1)
angles[i,j,k]= 1.0 - np.arccos(a) / (math.pi / 2)
return angles
def noise_from_diffs(X, direction='lowerright'):
'''Estimates noise statistcs by taking differences of adjacent pixels.
Arguments:
`X` (np.ndarray):
The data from which to estimage noise statistics. `X` should have
shape `(nrows, ncols, nbands`).
`direction` (str, default "lowerright"):
The pixel direction along which to calculate pixel differences.
Must be one of the following:
'lowerright':
Take difference with pixel diagonally to lower right
'lowerleft':
Take difference with pixel diagonally to lower right
'right':
Take difference with pixel to the right
'lower':
Take differenece with pixel below
Returns a :class:`~spectral.algorithms.algorithms.GaussianStats` object.
'''
if direction.lower() not in ['lowerright', 'lowerleft', 'right', 'lower']:
raise ValueError('Invalid `direction` value.')
if direction == 'lowerright':
deltas = X[:-1, :-1, :] - X[1:, 1:, :]
elif direction == 'lowerleft':
deltas = X[:-1, 1:, :] - X[1:, :-1, :]
elif direction == 'right':
deltas = X[:, :-1, :] - X[:, 1:, :]
else:
deltas = X[:-1, :, :] - X[1:, :, :]
stats = calc_stats(deltas)
stats.cov /= 2.0
return stats
class MNFResult(object):
'''Result object returned by :func:`~spectral.algorithms.algorithms.mnf`.
This object contains data associates with a Minimum Noise Fraction
calculation, including signal and noise statistics, as well as the
Noise-Adjusted Principal Components (NAPC). This object can be used to
denoise image data or to reduce its dimensionality.
'''
def __init__(self, signal, noise, napc):
'''
Arguments:
`signal` (:class:`~spectral.GaussianStats`):
Signal statistics
`noise` (:class:`~spectral.GaussianStats`):
Noise statistics
`napc` (:class:`~spectral.PrincipalComponents`):
Noise-Adjusted Pricipal Components
'''
self.signal = signal
self.noise = noise
self.napc = napc
def _num_from_kwargs(self, **kwargs):
'''Returns number of components to retain for the given kwargs.'''
for key in kwargs:
if key not in ('num', 'snr'):
raise Exception('Keyword not recognized.')
num = kwargs.get('num', None)
snr = kwargs.get('snr', None)
if num == snr == None:
raise Exception('Must specify either `num` or `snr` keyword.')
if None not in (num, snr):
raise Exception('Can not specify both `num` and `snr` keywords.')
if snr is not None:
num = self.num_with_snr(snr)
return num
def denoise(self, X, **kwargs):
'''Returns a de-noised version of `X`.
Arguments:
`X` (np.ndarray):
Data to be de-noised. Can be a single pixel or an image.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns denoised image data with same shape as `X`.
Note that calling this method is equivalent to calling the
`get_denoising_transform` method with same keyword and applying the
returned transform to `X`. If you only intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
denoising transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_denoising_transform(**kwargs)
return f(X)
def get_denoising_transform(self, **kwargs):
'''Returns a function for denoising image data.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for denoising image data.
'''
N = self._num_from_kwargs(**kwargs)
V = self.napc.eigenvectors
Vr = np.array(V)
Vr[:, N:] = 0.
f = LinearTransform(self.noise.sqrt_cov.dot(Vr).dot(V.T) \
.dot(self.noise.sqrt_inv_cov),
pre=-self.signal.mean,
post=self.signal.mean)
return f
def reduce(self, X, **kwargs):
'''Reduces dimensionality of image data.
Arguments:
`X` (np.ndarray):
Data to be reduced. Can be a single pixel or an image.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a verions of `X` with reduced dimensionality.
Note that calling this method is equivalent to calling the
`get_reduction_transform` method with same keyword and applying the
returned transform to `X`. If you intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
reduction transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_reduction_transform(**kwargs)
return f(X)
def get_reduction_transform(self, **kwargs):
'''Reduces dimensionality of image data.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for reducing the dimensionality of image data.
'''
N = self._num_from_kwargs(**kwargs)
V = self.napc.eigenvectors
f = LinearTransform(V[:, :N].T.dot(self.noise.sqrt_inv_cov),
pre=-self.signal.mean)
return f
def num_with_snr(self, snr):
'''Returns the number of components with SNR >= `snr`.'''
return np.sum(self.napc.eigenvalues >= (snr + 1))
def mnf(signal, noise):
'''Computes Minimum Noise Fraction / Noise-Adjusted Principal Components.
Arguments:
`signal` (:class:`~spectral.algorithms.algorithms.GaussianStats`):
Estimated signal statistics
`noise` (:class:`~spectral.algorithms.algorithms.GaussianStats`):
Estimated noise statistics
Returns an :class:`~spectral.algorithms.algorithms.MNFResult` object,
containing the Noise-Adjusted Principal Components (NAPC) and methods for
denoising or reducing dimensionality of associated data.
The Minimum Noise Fraction (MNF) is similar to the Principal Components
transformation with the difference that the Principal Components associated
with the MNF are ordered by descending signal-to-noise ratio (SNR) rather
than overall image variance. Note that the eigenvalues of the NAPC are
equal to one plus the SNR in the transformed space (since noise has
whitened unit variance in the NAPC coordinate space).
Example:
>>> data = open_image('92AV3C.lan').load()
>>> signal = calc_stats(data)
>>> noise = noise_from_diffs(data[117: 137, 85: 122, :])
>>> mnfr = mnf(signal, noise)
>>> # De-noise the data by eliminating NAPC components where SNR < 10.
>>> # The de-noised data will be in the original coordinate space (at
>>> # full dimensionality).
>>> denoised = mnfr.denoise(data, snr=10)
>>> # Reduce dimensionality, retaining NAPC components where SNR >= 10.
>>> reduced = mnfr.reduce(data, snr=10)
>>> # Reduce dimensionality, retaining top 50 NAPC components.
>>> reduced = mnfr.reduce(data, num=50)
References:
Lee, <NAME>., <NAME>, and <NAME>. "Enhancement of
high spectral resolution remote-sensing data by a noise-adjusted
principal components transform." Geoscience and Remote Sensing, IEEE
Transactions on 28.3 (1990): 295-304.
'''
C = noise.sqrt_inv_cov.dot(signal.cov).dot(noise.sqrt_inv_cov)
(L, V) = np.linalg.eig(C)
# numpy says eigenvalues may not be sorted so we'll sort them, if needed.
if not np.alltrue(np.diff(L) <= 0):
ii = list(reversed(np.argsort(L)))
L = L[ii]
V = V[:, ii]
wstats = GaussianStats(mean=np.zeros_like(L), cov=C)
napc = PrincipalComponents(L, V, wstats)
return MNFResult(signal, noise, napc)
def ppi(X, niters, threshold=0, centered=False, start=None, display=0,
**imshow_kwargs):
'''Returns pixel purity indices for an image.
Arguments:
`X` (ndarray):
Image data for which to calculate pixel purity indices
`niters` (int):
Number of iterations to perform. Each iteration corresponds to a
projection of the image data onto a random unit vector.
`threshold` (numeric):
If this value is zero, only the two most extreme pixels will have
their indices incremented for each random vector. If the value is
greater than zero, then all pixels whose projections onto the
random vector are with `threshold` data units of either of the two
extreme pixels will also have their indices incremented.
`centered` (bool):
If True, then the pixels in X are assumed to have their mean
already subtracted; otherwise, the mean of `X` will be computed
and subtracted prior to computing the purity indices.
`start` (ndarray):
An optional array of initial purity indices. This can be used to
continue computing PPI values after a previous call to `ppi` (i.e.,
set `start` equal to the return value from a previou call to `ppi`.
This should be an integer-valued array whose dimensions are equal
to the first two dimensions of `X`.
`display` (integer):
If set to a postive integer, a :class:`~spectral.graphics.spypylab.ImageView`
window will be opened and dynamically display PPI values as the
function iterates. The value specifies the number of PPI iterations
between display updates. It is recommended to use a value around
100 or higher. If the `stretch` keyword (see :func:`~spectral.graphics.graphics.get_rgb`
for meaning) is not provided, a default stretch of (0.99, 0.999)
is used.
Return value:
An ndarray of integers that represent the pixel purity indices of the
input image. The return array will have dimensions equal to the first
two dimensions of the input image.
Keyword Arguments:
Any keyword accepted by :func:`~spectral.graphics.spypylab.imshow`.
These keywords will be passed to the image display and only have an
effect if the `display` argument is nonzero.
This function can be interruped with a KeyboardInterrupt (ctrl-C), in which
case, the most recent value of the PPI array will be returned. This can be
used in conjunction with the `display` argument to view the progression of
the PPI values until they appear stable, then terminate iteration using
ctrl-C.
References:
<NAME>., <NAME>, and <NAME>., "Mapping Target Signatures via
Partial Unmixing of AVIRIS Data," Pasadena, California, USA, 23 Jan 1995,
URI: http://hdl.handle.net/2014/33635
'''
if display is not None:
if not isinstance(display, Integral) or isinstance(display, bool) or \
display < 0:
msg = '`display` argument must be a non-negative integer.'
raise ValueError(msg)
if not centered:
stats = calc_stats(X)
X = X - stats.mean
shape = X.shape
X = X.reshape(-1, X.shape[-1])
nbands = X.shape[-1]
fig = None
updating = False
if start is not None:
counts = np.array(start.ravel())
else:
counts = np.zeros(X.shape[0], dtype=np.uint32)
if 'stretch' not in imshow_kwargs:
imshow_kwargs['stretch'] = (0.99, 0.999)
msg = 'Running {0} pixel purity iterations...'.format(niters)
spy._status.display_percentage(msg)
try:
for i in range(niters):
r = np.random.rand(nbands) - 0.5
r /= np.sqrt(np.sum(r * r))
s = X.dot(r)
imin = np.argmin(s)
imax = np.argmax(s)
updating = True
if threshold == 0:
# Only the two extreme pixels are incremented
counts[imin] += 1
counts[imax] += 1
else:
# All pixels within threshold distance from the two extremes
counts[s >= (s[imax] - threshold)] += 1
counts[s <= (s[imin] + threshold)] += 1
updating = False
if display > 0 and (i + 1) % display == 0:
if fig is not None:
fig.set_data(counts.reshape(shape[:2]), **imshow_kwargs)
else:
fig = spy.imshow(counts.reshape(shape[:2]), **imshow_kwargs)
fig.set_title('PPI ({} iterations)'.format(i + 1))
if not (i + 1) % 10:
spy._status.update_percentage(100 * (i + 1) / niters)
except KeyboardInterrupt:
spy._status.end_percentage('interrupted')
if not updating:
msg = 'KeyboardInterrupt received. Returning pixel purity ' \
'values after {0} iterations.'.format(i)
spy._status.write(msg)
return counts.reshape(shape[:2])
else:
msg = 'KeyboardInterrupt received during array update. PPI ' \
'values may be corrupt. Returning None'
spy._status.write(msg)
return None
spy._status.end_percentage()
return counts.reshape(shape[:2])
def smacc(spectra, min_endmembers=None, max_residual_norm=float('Inf')):
'''Returns SMACC decomposition (H = F * S + R) matrices for an image or
array of spectra.
Let `H` be matrix of shape NxB, where B is number of bands, and N number of
spectra, then if `spectra` is of the same shape, `H` will be equal to `spectra`.
Otherwise, `spectra` is assumed to be 3D spectral image, and it is reshaped
to match shape of `H`.
Arguments:
`spectra` (ndarray):
Image data for which to calculate SMACC decomposition matrices.
`min_endmembers` (int):
Minimal number of endmembers to find. Defaults to rank of `H`,
computed numerically with `numpy.linalg.matrix_rank`.
`max_residual_norm`:
Maximum value of residual vectors' norms. Algorithm will keep finding
new endmembers until max value of residual norms is less than this
argument. Defaults to float('Inf')
Returns:
3 matrices, S, F and R, such that H = F * S + R (but it might not always hold).
F is matrix of expansion coefficients of shape N x num_endmembers.
All values of F are equal to, or greater than zero.
S is matrix of endmember spectra, extreme vectors, of shape num_endmembers x B.
R is matrix of residuals of same shape as H (N x B).
If values of H are large (few tousands), H = F * S + R, might not hold,
because of numeric errors. It is advisable to scale numbers, by dividing
by 10000, for example. Depending on how accurate you want it to be,
you can check if H is really strictly equal to F * S + R,
and adjust R: R = H - np.matmul(F, S).
References:
<NAME>, <NAME>, and <NAME> "The sequential
maximum angle convex cone (SMACC) endmember model", Proc. SPIE 5425, Algorithms
and Technologies for Multispectral, Hyperspectral, and Ultraspectral Imagery X,
(12 August 2004); https://doi.org/10.1117/12.543794
'''
# Indices of vectors in S.
q = []
H = spectra if len(spectra.shape) == 2 else spectra.reshape(
(spectra.shape[0] * spectra.shape[1], spectra.shape[2]))
R = H
Fs = []
F = None
S = None
if min_endmembers is None:
min_endmembers = np.linalg.matrix_rank(H)
# Add the longest vector to q.
residual_norms = np.sqrt(np.einsum('ij,ij->i', H, H))
current_max_residual_norm = np.max(residual_norms)
if max_residual_norm is None:
max_residual_norm = current_max_residual_norm / min_endmembers
while len(q) < min_endmembers or current_max_residual_norm > max_residual_norm:
q.append(np.argmax(residual_norms))
n = len(q) - 1
# Current basis vector.
w = R[q[n]]
# Temporary to be used for projection calculation.
wt = w / (np.dot(w, w))
# Calculate projection coefficients.
On = np.dot(R, wt)
alpha = np.ones(On.shape, dtype=np.float64)
# Make corrections to satisfy convex cone conditions.
# First correct alphas for oblique projection when needed.
for k in range(len(Fs)):
t = On * Fs[k][q[n]]
# This is not so important for the algorithm itself.
# These values correpond to values where On == 0.0, and these
# will be zeroed out below. But to avoid divide-by-zero warning
# we set small values instead of zero.
t[t == 0.0] = 1e-10
np.minimum(Fs[k]/t, alpha, out=alpha)
# Clip negative projection coefficients.
alpha[On <= 0.0] = 0.0
# Current extreme vector should always be removed completely.
alpha[q[n]] = 1.0
# Calculate oblique projection coefficients.
Fn = alpha * On
# Correction for numerical stability.
Fn[Fn <= 0.0] = 0.0
# Remove projection to current basis from R.
R = R - np.outer(Fn, w)
# Update projection coefficients.
for k in range(len(Fs)):
Fs[k] -= Fs[k][q[n]] * Fn
# Correction because of numerical problems.
Fs[k][Fs[k] <= 0.0] = 0.0
# Add new Fn.
Fs.append(Fn)
residual_norms[:] = np.sqrt(np.einsum('ij,ij->i', R, R))
current_max_residual_norm = np.max(residual_norms)
print('Found {0} endmembers, current max residual norm is {1:.4f}\r'
.format(len(q), current_max_residual_norm), end='')
# Correction as suggested in the SMACC paper.
for k, s in enumerate(q):
Fs[k][q] = 0.0
Fs[k][s] = 1.0
F = np.array(Fs).T
S = H[q]
# H = F * S + R
return S, F, R | spectral/algorithms/algorithms.py | from __future__ import absolute_import, division, print_function, unicode_literals
import math
from numbers import Integral
import numpy as np
import pickle
import spectral as spy
from ..io.spyfile import SpyFile, TransformedImage
from ..utilities.errors import has_nan, NaNValueError
from .spymath import matrix_sqrt
from .transforms import LinearTransform
class Iterator:
'''
Base class for iterators over pixels (spectra).
'''
def __init__(self):
pass
def __iter__(self):
raise NotImplementedError('Must override __iter__ in child class.')
def get_num_elements(self):
raise NotImplementedError(
'Must override get_num_elements in child class.')
def get_num_bands(self):
raise NotImplementedError(
'Must override get_num_bands in child class.')
class ImageIterator(Iterator):
'''
An iterator over all pixels in an image.
'''
def __init__(self, im):
self.image = im
self.numElements = im.shape[0] * im.shape[1]
def get_num_elements(self):
return self.numElements
def get_num_bands(self):
return self.image.shape[2]
def __iter__(self):
(M, N) = self.image.shape[:2]
count = 0
for i in range(M):
self.row = i
for j in range(N):
self.col = j
yield self.image[i, j]
class ImageMaskIterator(Iterator):
'''
An iterator over all pixels in an image corresponding to a specified mask.
'''
def __init__(self, image, mask, index=None):
if mask.shape != image.shape[:len(mask.shape)]:
raise ValueError('Mask shape does not match image.')
self.image = image
self.index = index
# Get the proper mask for the training set
if index:
self.mask = np.equal(mask, index)
else:
self.mask = np.not_equal(mask, 0)
self.n_elements = sum(self.mask.ravel())
def get_num_elements(self):
return self.n_elements
def get_num_bands(self):
return self.image.shape[2]
def __iter__(self):
coords = np.argwhere(self.mask)
for (i, j) in coords:
(self.row, self.col) = (i, j)
yield self.image[i, j].astype(self.image.dtype).squeeze()
def iterator(image, mask=None, index=None):
'''
Returns an iterator over pixels in the image.
Arguments:
`image` (ndarray or :class:`spectral.Image`):
An image over whose pixels will be iterated.
`mask` (ndarray) [default None]:
An array of integers that specify over which pixels in `image`
iteration should be performed.
`index` (int) [default None]:
Specifies which value in `mask` should be used for iteration.
Returns (:class:`spectral.Iterator`):
An iterator over image pixels.
If neither `mask` nor `index` are defined, iteration is performed over all
pixels. If `mask` (but not `index`) is defined, iteration is performed
over all pixels for which `mask` is nonzero. If both `mask` and `index`
are defined, iteration is performed over all pixels `image[i,j]` for which
`mask[i,j] == index`.
'''
if isinstance(image, Iterator):
return image
elif mask is not None:
return ImageMaskIterator(image, mask, index)
else:
return ImageIterator(image)
def iterator_ij(mask, index=None):
'''
Returns an iterator over image pixel coordinates for a given mask.
Arguments:
`mask` (ndarray) [default None]:
An array of integers that specify which coordinates should
be returned.
`index` (int) [default None]:
Specifies which value in `mask` should be used for iteration.
Returns:
An iterator over image pixel coordinates. Each returned item is a
2-tuple of the form (row, col).
If `index` is not defined, iteration is performed over all non-zero
elements. If `index` is defined, iteration is performed over all
coordinates for whch `mask[i,j] == index`.
'''
if mask.ndim != 2:
raise ValueError('Invalid mask shape.')
if index is None:
mask = mask != 0
else:
mask = mask == index
for rc in np.argwhere(mask):
yield tuple(rc)
def mean_cov(image, mask=None, index=None):
'''
Return the mean and covariance of the set of vectors.
Usage::
(mean, cov, S) = mean_cov(vectors [, mask=None [, index=None]])
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns a 3-tuple containing:
`mean` (ndarray):
The length-`B` mean vectors
`cov` (ndarray):
The `BxB` unbiased estimate (dividing by N-1) of the covariance
of the vectors.
`S` (int):
Number of samples used to calculate mean & cov
Calculate the mean and covariance of of the given vectors. The argument
can be an Iterator, a SpyFile object, or an `MxNxB` array.
'''
status = spy._status
if isinstance(image, np.ndarray):
X = image.astype(np.float64)
if X.ndim == 3:
X = image.reshape(-1, image.shape[-1]).T
if mask is not None:
mask = mask.ravel()
if index is not None:
ii = np.argwhere(mask == index)
else:
ii = np.argwhere(mask != 0)
X = np.take(X, ii.squeeze(), axis=1)
m = np.average(X, axis=1)
C = np.cov(X)
return (m, C, X.shape[1])
if not isinstance(image, Iterator):
it = iterator(image, mask, index)
else:
it = image
nSamples = it.get_num_elements()
B = it.get_num_bands()
sumX = np.zeros((B,), 'd')
sumX2 = np.zeros((B, B), 'd')
count = 0
statusInterval = max(1, nSamples / 100)
status.display_percentage('Covariance.....')
for x in it:
if not count % statusInterval:
status.update_percentage(float(count) / nSamples * 100.)
count += 1
sumX += x
x = x.astype(np.float64)[:, np.newaxis]
sumX2 += x.dot(x.T)
mean = (sumX / count)
sumX = sumX[:, np.newaxis]
cov = (sumX2 - sumX.dot(sumX.T) / count) / (count - 1)
status.end_percentage()
return (mean, cov, count)
def cov_avg(image, mask, weighted=True):
'''Calculates the covariance averaged over a set of classes.
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (integer-valued ndarray):
Elements specify the classes associated with pixels in `image`.
All pixels associeted with non-zero elements of `mask` will be
used in the covariance calculation.
`weighted` (bool, default True):
Specifies whether the individual class covariances should be
weighted when computing the average. If True, each class will
be weighted by the number of pixels provided for the class;
otherwise, a simple average of the class covariances is performed.
Returns a class-averaged covariance matrix. The number of covariances used
in the average is equal to the number of non-zero elements of `mask`.
'''
ids = set(mask.ravel()) - set((0,))
classes = [calc_stats(image, mask, i) for i in ids]
N = sum([c.nsamples for c in classes])
if weighted:
return np.sum([((c.nsamples - 1) / float(N - 1)) * c.cov
for c in classes], axis=0, dtype=np.float64)
else:
return np.mean([c.cov for c in classes], axis=0, dtype=np.float64)
def covariance(*args):
'''
Returns the covariance of the set of vectors.
Usage::
C = covariance(vectors [, mask=None [, index=None]])
Arguments:
`vectors` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns:
`C` (ndarray):
The `BxB` unbiased estimate (dividing by N-1) of the covariance
of the vectors.
To also return the mean vector and number of samples, call
:func:`~spectral.algorithms.algorithms.mean_cov` instead.
'''
return mean_cov(*args)[1]
class PrincipalComponents:
'''
An object for storing a data set's principal components. The
object has the following members:
`eigenvalues`:
A length B array of eigenvalues sorted in descending order
`eigenvectors`:
A `BxB` array of normalized eigenvectors (in columns)
`stats` (:class:`GaussianStats`):
A statistics object containing `mean`, `cov`, and `nsamples`.
`transform`:
A callable function to transform data to the space of the
principal components.
`reduce`:
A method to return a reduced set of principal components based
on either a fixed number of components or a fraction of total
variance.
`denoise`:
A callable function to denoise data using a reduced set of
principal components.
`get_denoising_transform`:
A callable function that returns a function for denoising data.
'''
def __init__(self, vals, vecs, stats):
self.eigenvalues = vals
self.eigenvectors = vecs
self.stats = stats
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
@property
def mean(self):
return self.stats.mean
@property
def cov(self):
return self.stats.cov
def reduce(self, N=0, **kwargs):
'''Reduces the number of principal components.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to retain. The top `num`
eigenvalues will be retained.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be retained.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be retained (starting from greatest to smallest) until
`fraction` of total image variance is retained.
'''
status = spy._status
num = kwargs.get('num', None)
eigs = kwargs.get('eigs', None)
fraction = kwargs.get('fraction', None)
if num is not None:
return PrincipalComponents(self.eigenvalues[:num],
self.eigenvectors[:, :num],
self.stats)
elif eigs is not None:
vals = self.eigenvalues[eigs]
vecs = self.eigenvectors[:, eigs]
return PrincipalComponents(vals, vecs, self.stats)
elif fraction is not None:
if not 0 < fraction <= 1:
raise Exception('fraction must be in range (0,1].')
N = len(self.eigenvalues)
cumsum = np.cumsum(self.eigenvalues)
sum = cumsum[-1]
# Count how many values to retain.
for i in range(N):
if (cumsum[i] / sum) >= fraction:
break
if i == (N - 1):
# No reduction
status.write('No reduction in eigenvectors achieved.')
return self
vals = self.eigenvalues[:i + 1]
vecs = self.eigenvectors[:, :i + 1]
return PrincipalComponents(vals, vecs, self.stats)
else:
raise Exception('Must specify one of the following keywords:'
'`num`, `eigs`, `fraction`.')
def denoise(self, X, **kwargs):
'''Returns a de-noised version of `X`.
Arguments:
`X` (np.ndarray):
Data to be de-noised. Can be a single pixel or an image.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns denoised image data with same shape as `X`.
Note that calling this method is equivalent to calling the
`get_denoising_transform` method with same keyword and applying the
returned transform to `X`. If you only intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
denoising transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_denoising_transform(**kwargs)
return f(X)
def get_denoising_transform(self, **kwargs):
'''Returns a function for denoising image data.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for denoising image data.
'''
V = self.reduce(self, **kwargs).eigenvectors
f = LinearTransform(V.dot(V.T), pre=-self.mean,
post=self.mean)
return f
def principal_components(image):
'''
Calculate Principal Component eigenvalues & eigenvectors for an image.
Usage::
pc = principal_components(image)
Arguments:
`image` (ndarray, :class:`spectral.Image`, :class:`GaussianStats`):
An `MxNxB` image
Returns a :class:`~spectral.algorithms.algorithms.PrincipalComponents`
object with the following members:
`eigenvalues`:
A length B array of eigenvalues
`eigenvectors`:
A `BxB` array of normalized eigenvectors
`stats` (:class:`GaussianStats`):
A statistics object containing `mean`, `cov`, and `nsamples`.
`transform`:
A callable function to transform data to the space of the
principal components.
`reduce`:
A method to reduce the number of eigenvalues.
`denoise`:
A callable function to denoise data using a reduced set of
principal components.
`get_denoising_transform`:
A callable function that returns a function for denoising data.
'''
if isinstance(image, GaussianStats):
stats = image
else:
stats = calc_stats(image)
(L, V) = np.linalg.eig(stats.cov)
# numpy says eigenvalues may not be sorted so we'll sort them, if needed.
if not np.alltrue(np.diff(L) <= 0):
ii = list(reversed(np.argsort(L)))
L = L[ii]
V = V[:, ii]
return PrincipalComponents(L, V, stats)
class FisherLinearDiscriminant:
'''
An object for storing a data set's linear discriminant data. For `C`
classes with `B`-dimensional data, the object has the following members:
`eigenvalues`:
A length `C-1` array of eigenvalues
`eigenvectors`:
A `BxC` array of normalized eigenvectors
`mean`:
The length `B` mean vector of the image pixels (from all classes)
`cov_b`:
The `BxB` matrix of covariance *between* classes
`cov_w`:
The `BxB` matrix of average covariance *within* each class
`transform`:
A callable function to transform data to the space of the
linear discriminant.
'''
def __init__(self, vals, vecs, mean, cov_b, cov_w):
self.eigenvalues = vals
self.eigenvectors = vecs
self.mean = mean
self.cov_b = cov_b
self.cov_w = cov_w
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
def linear_discriminant(classes, whiten=True):
'''
Solve Fisher's linear discriminant for eigenvalues and eigenvectors.
Usage: (L, V, Cb, Cw) = linear_discriminant(classes)
Arguments:
`classes` (:class:`~spectral.algorithms.TrainingClassSet`):
The set of `C` classes to discriminate.
Returns a `FisherLinearDiscriminant` object containing the within/between-
class covariances, mean vector, and a callable transform to convert data to
the transform's space.
This function determines the solution to the generalized eigenvalue problem
Cb * x = lambda * Cw * x
Since cov_w is normally invertable, the reduces to
(inv(Cw) * Cb) * x = lambda * x
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
C = len(classes) # Number of training sets
rank = len(classes) - 1
classes.calc_stats()
# Calculate total # of training pixels and total mean
N = 0
B = classes.nbands
K = len(classes)
mean = np.zeros(B, dtype=np.float64)
for s in classes:
N += s.size()
mean += s.size() * s.stats.mean
mean /= N
cov_b = np.zeros((B, B), np.float64) # cov between classes
cov_w = np.zeros((B, B), np.float64) # cov within classes
for s in classes:
cov_w += ((s.size() - 1) / float(N - 1)) * s.stats.cov
m = s.stats.mean - mean
cov_b += (s.size() / float(N) / (K - 1)) * np.outer(m, m)
inv_cov_w = np.linalg.inv(cov_w)
(vals, vecs) = np.linalg.eig(inv_cov_w.dot(cov_b))
vals = vals[:rank]
vecs = vecs[:, :rank]
if whiten:
# Diagonalize cov_within in the new space
v = vecs.T.dot(cov_w).dot(vecs)
d = np.sqrt(np.diag(v) * np.diag(v).conj())
for i in range(vecs.shape[1]):
vecs[:, i] /= math.sqrt(d[i].real)
return FisherLinearDiscriminant(vals.real, vecs.real, mean, cov_b, cov_w)
# Alias for Linear Discriminant Analysis (LDA)
lda = linear_discriminant
def log_det(x):
return sum(np.log([eigv for eigv in np.linalg.eigvals(x)
if eigv > 0]))
class GaussianStats(object):
'''A class for storing Gaussian statistics for a data set.
Statistics stored include:
`mean`:
Mean vector
`cov`:
Covariance matrix
`nsamples`:
Number of samples used in computing the statistics
Several derived statistics are computed on-demand (and cached) and are
available as property attributes. These include:
`inv_cov`:
Inverse of the covariance
`sqrt_cov`:
Matrix square root of covariance: sqrt_cov.dot(sqrt_cov) == cov
`sqrt_inv_cov`:
Matrix square root of the inverse of covariance
`log_det_cov`:
The log of the determinant of the covariance matrix
`principal_components`:
The principal components of the data, based on mean and cov.
'''
def __init__(self, mean=None, cov=None, nsamples=None, inv_cov=None):
self.cov = cov
self._inv_cov = inv_cov
self.mean = mean
self.nsamples = nsamples
@property
def cov(self):
'''Property method returning the covariance matrix.'''
return self._cov
@cov.setter
def cov(self, C):
self.reset_derived_stats()
self._cov = C
@property
def inv_cov(self):
'''Property method returning the inverse of the covariance matrix.'''
if self._inv_cov is None:
self._inv_cov = np.linalg.inv(self._cov)
return self._inv_cov
def reset_derived_stats(self):
self._cov = self._inv_cov = None
self._sqrt_cov = self._sqrt_inv_cov = self._pcs = None
self._log_det_cov = None
@property
def sqrt_cov(self):
'''Property method returning the matrix square root of the covariance.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == C.
'''
if self._sqrt_cov is None:
pcs = self.principal_components
self._sqrt_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True)
return self._sqrt_cov
@property
def sqrt_inv_cov(self):
'''Property method returning matrix square root of inverse of cov.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == inv(C).
'''
if self._sqrt_inv_cov is None:
pcs = self.principal_components
self._sqrt_inv_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True,
inverse=True)
return self._sqrt_inv_cov
@property
def principal_components(self):
if self._pcs is None:
(evals, evecs) = np.linalg.eigh(self._cov)
self._pcs = PrincipalComponents(evals, evecs, self)
return self._pcs
@property
def log_det_cov(self):
if self._log_det_cov is None:
evals = self.principal_components.eigenvalues
self._log_det_cov = np.sum(np.log([v for v in evals if v > 0]))
return self._log_det_cov
def transform(self, xform):
'''Returns a version of the stats transformed by a linear transform.'''
if not isinstance(xform, LinearTransform):
raise TypeError('Expected a LinearTransform object.')
m = xform(self.mean)
C = xform._A.dot(self.cov).dot(xform._A.T)
return GaussianStats(mean=m, cov=C, nsamples=self.nsamples)
def get_whitening_transform(self):
'''Returns transform that centers and whitens data for these stats.'''
C_1 = np.linalg.inv(self.cov)
return LinearTransform(matrix_sqrt(C_1, True), pre=-self.mean)
def calc_stats(image, mask=None, index=None, allow_nan=False):
'''Computes Gaussian stats for image data..
Arguments:
`image` (ndarrray, :class:`~spectral.Image`, or :class:`spectral.Iterator`):
If an ndarray, it should have shape `MxNxB` and the mean &
covariance will be calculated for each band (third dimension).
`mask` (ndarray):
If `mask` is specified, mean & covariance will be calculated for
all pixels indicated in the mask array. If `index` is specified,
all pixels in `image` for which `mask == index` will be used;
otherwise, all nonzero elements of `mask` will be used.
`index` (int):
Specifies which value in `mask` to use to select pixels from
`image`. If not specified but `mask` is, then all nonzero elements
of `mask` will be used.
`allow_nan` (bool, default False):
If True, statistics will be computed even if `np.nan` values are
present in the data; otherwise, `~spectral.algorithms.spymath.NaNValueError`
is raised.
If neither `mask` nor `index` are specified, all samples in `vectors`
will be used.
Returns:
`GaussianStats` object:
This object will have members `mean`, `cov`, and `nsamples`.
'''
(mean, cov, N) = mean_cov(image, mask, index)
if has_nan(mean) and not allow_nan:
raise NaNValueError('NaN values present in data.')
return GaussianStats(mean=mean, cov=cov, nsamples=N)
class TrainingClass:
def __init__(self, image, mask, index=0, class_prob=1.0):
'''Creates a new training class defined by applying `mask` to `image`.
Arguments:
`image` (:class:`spectral.Image` or :class:`numpy.ndarray`):
The `MxNxB` image over which the training class is defined.
`mask` (:class:`numpy.ndarray`):
An `MxN` array of integers that specifies which pixels in
`image` are associated with the class.
`index` (int) [default 0]:
if `index` == 0, all nonzero elements of `mask` are associated
with the class. If `index` is nonzero, all elements of `mask`
equal to `index` are associated with the class.
`class_prob` (float) [default 1.0]:
Defines the prior probability associated with the class, which
is used in maximum likelihood classification. If `classProb`
is 1.0, prior probabilities are ignored by classifiers, giving
all class equal weighting.
'''
self.image = image
if image is not None:
self.nbands = image.shape[2]
self.nbands = None
self.mask = mask
self.index = index
self.class_prob = class_prob
self.stats = None
self._stats_valid = False
def __iter__(self):
'''Returns an iterator over all samples for the class.'''
it = ImageMaskIterator(self.image, self.mask, self.index)
for i in it:
yield i
def stats_valid(self, tf=None):
'''
Sets statistics for the TrainingClass to be valid or invalid.
Arguments:
`tf` (bool or None):
A value evaluating to False indicates that statistics should be
recalculated prior to being used. If the argument is `None`,
a value will be returned indicating whether stats need to be
recomputed.
'''
if tf is None:
return self._stats_valid
self._stats_valid = tf
def size(self):
'''Returns the number of pixels/samples in the training set.'''
# If the stats are invalid, the number of pixels in the
# training set may have changed.
if self._stats_valid:
return self.stats.nsamples
if self.index:
return np.sum(np.equal(self.mask, self.index).ravel())
else:
return np.sum(np.not_equal(self.mask, 0).ravel())
def calc_stats(self):
'''
Calculates statistics for the class.
This function causes the :attr:`stats` attribute of the class to be
updated, where `stats` will have the following attributes:
============= ====================== ===================================
Attribute Type Description
============= ====================== ===================================
`mean` :class:`numpy.ndarray` length-`B` mean vector
`cov` :class:`numpy.ndarray` `BxB` covariance matrix
`inv_cov` :class:`numpy.ndarray` Inverse of `cov`
`log_det_cov` float Natural log of determinant of `cov`
============= ====================== ===================================
'''
self.stats = calc_stats(self.image, self.mask, self.index)
self.nbands = self.image.shape[-1]
self._stats_valid = True
def transform(self, transform):
'''
Perform a linear transformation on the statistics of the training set.
Arguments:
`transform` (:class:numpy.ndarray or LinearTransform):
The linear transform array. If the class has `B` bands, then
`transform` must have shape `(C,B)`.
After `transform` is applied, the class statistics will have `C` bands.
'''
if isinstance(transform, np.ndarray):
transform = LinearTransform(transform)
self.stats.mean = transform(self.stats.mean)
self.stats.cov = np.dot(
transform._A, self.stats.cov).dot(transform._A.T)
self.nbands = transform.dim_out
class SampleIterator:
'''Iterator over all classes and samples in a TrainingClassSet object.'''
def __init__(self, trainingData):
self.classes = trainingData
def __iter__(self):
for cl in self.classes:
for sample in cl:
yield sample
class TrainingClassSet:
'''A class to manage a set of :class:`~spectral.TrainingClass` objects.'''
def __init__(self):
self.classes = {}
self.nbands = None
def __getitem__(self, i):
'''Returns the training class having ID i.'''
return self.classes[i]
def __len__(self):
'''Returns number of training classes in the set.'''
return len(self.classes)
def add_class(self, cl):
'''Adds a new class to the training set.
Arguments:
`cl` (:class:`spectral.TrainingClass`):
`cl.index` must not duplicate a class already in the set.
'''
if cl.index in self.classes:
raise Exception('Attempting to add class with duplicate index.')
self.classes[cl.index] = cl
if not self.nbands:
self.nbands = cl.nbands
def transform(self, X):
'''Applies linear transform, M, to all training classes.
Arguments:
`X` (:class:numpy.ndarray):
The linear transform array. If the classes have `B` bands, then
`X` must have shape `(C,B)`.
After the transform is applied, all classes will have `C` bands.
'''
for cl in list(self.classes.values()):
cl.transform(X)
self.nbands = list(self.classes.values())[0].nbands
def __iter__(self):
'''An iterator over all training classes in the set.'''
for cl in list(self.classes.values()):
yield cl
def all_samples(self):
'''An iterator over all samples in all classes.'''
return SampleIterator(self)
def calc_stats(self):
'''Computes statistics for each class, if not already computed.'''
for c in list(self.classes.values()):
if not c.stats_valid():
c.calc_stats()
self.nbands = list(self.classes.values())[0].nbands
def save(self, filename, calc_stats=False):
for c in list(self.classes.values()):
if c.stats is None:
if calc_stats == False:
msg = 'Class statistics are missing from at least one ' \
'class and are required to save the training class ' \
'data. Call the `save` method with keyword ' \
'`calc_stats=True` if you want to compute them and ' \
'then save the class data.'
raise Exception (msg)
else:
c.calc_stats()
f = open(filename, 'wb')
ids = sorted(self.classes.keys())
pickle.dump(self.classes[ids[0]].mask, f)
pickle.dump(len(self), f)
for id in ids:
c = self.classes[id]
pickle.dump(c.index, f)
pickle.dump(c.stats.cov, f)
pickle.dump(c.stats.mean, f)
pickle.dump(c.stats.nsamples, f)
pickle.dump(c.class_prob, f)
f.close()
def load(self, filename, image):
f = open(filename, 'rb')
mask = pickle.load(f)
nclasses = pickle.load(f)
for i in range(nclasses):
index = pickle.load(f)
cov = pickle.load(f)
mean = pickle.load(f)
nsamples = pickle.load(f)
class_prob = pickle.load(f)
c = TrainingClass(image, mask, index, class_prob)
c.stats = GaussianStats(mean=mean, cov=cov, nsamples=nsamples)
if not (cov is None or mean is None or nsamples is None):
c.stats_valid(True)
c.nbands = len(mean)
self.add_class(c)
f.close
def create_training_classes(image, class_mask, calc_stats=False, indices=None):
'''
Creates a :class:spectral.algorithms.TrainingClassSet: from an indexed array.
USAGE: sets = createTrainingClasses(classMask)
Arguments:
`image` (:class:`spectral.Image` or :class:`numpy.ndarray`):
The image data for which the training classes will be defined.
`image` has shape `MxNxB`.
`class_mask` (:class:`numpy.ndarray`):
A rank-2 array whose elements are indices of various spectral
classes. if `class_mask[i,j]` == `k`, then `image[i,j]` is
assumed to belong to class `k`.
`calc_stats` (bool):
An optional parameter which, if True, causes statistics to be
calculated for all training classes.
Returns:
A :class:`spectral.algorithms.TrainingClassSet` object.
The dimensions of classMask should be the same as the first two dimensions
of the corresponding image. Values of zero in classMask are considered
unlabeled and are not added to a training set.
'''
if indices is not None:
class_indices = set(indices) - set((0,))
else:
class_indices = set(class_mask.ravel()) - set((0,))
classes = TrainingClassSet()
classes.nbands = image.shape[-1]
for i in class_indices:
cl = TrainingClass(image, class_mask, i)
if calc_stats:
cl.calc_stats()
classes.add_class(cl)
return classes
def ndvi(data, red, nir):
'''Calculates Normalized Difference Vegetation Index (NDVI).
Arguments:
`data` (ndarray or :class:`spectral.Image`):
The array or SpyFile for which to calculate the index.
`red` (int or int range):
Index of the red band or an index range for multiple bands.
`nir` (int or int range):
An integer index of the near infrared band or an index range for
multiple bands.
Returns an ndarray:
An array containing NDVI values in the range [-1.0, 1.0] for each
corresponding element of data.
'''
r = data[:, :, red].astype(float)
if len(r.shape) == 3 and r.shape[2] > 1:
r = sum(r, 2) / r.shape[2]
n = data[:, :, nir].astype(float)
if len(n.shape) == 3 and n.shape[2] > 1:
n = sum(n, 2) / n.shape[2]
return (n - r) / (n + r)
def bdist(class1, class2):
'''
Calulates the Bhattacharyya distance between two classes.
USAGE: bd = bdist(class1, class2)
Arguments:
`class1`, `class2` (:class:`~spectral.algorithms.algorithms.TrainingClass`)
Returns:
A float value for the Bhattacharyya Distance between the classes. This
function is aliased to :func:`~spectral.algorithms.algorithms.bDistance`.
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
terms = bdist_terms(class1, class2)
return terms[0] + terms[1]
bDistance = bdist
def bdist_terms(a, b):
'''
Calulate the linear and quadratic terms of the Bhattacharyya distance
between two classes.
USAGE: (linTerm, quadTerm) = bDistanceTerms(a, b)
ARGUMENTS:
(a, b) The classes for which to determine the
B-distance.
RETURN VALUE:
A 2-tuple of the linear and quadratic terms
'''
m = a.stats.mean - b.stats.mean
avgCov = (a.stats.cov + b.stats.cov) / 2
lin_term = (1 / 8.) * np.dot(np.transpose(m), np.dot(np.inv(avgCov), m))
quad_term = 0.5 * (log_det(avgCov)
- 0.5 * a.stats.log_det_cov
- 0.5 * b.stats.log_det_cov)
return (lin_term, float(quad_term))
def transform_image(matrix, image):
'''
Performs linear transformation on all pixels in an image.
Arguments:
matrix (:class:`numpy.ndarray`):
A `CxB` linear transform to apply.
image (:class:`numpy.ndarray` or :class:`spectral.Image`):
Image data to transform
Returns:
If `image` is an `MxNxB` :class:`numpy.ndarray`, the return will be a
transformed :class:`numpy.ndarray` with shape `MxNxC`. If `image` is
:class:`spectral.Image`, the returned object will be a
:class:`spectral.TransformedImage` object and no transformation of data
will occur until elements of the object are accessed.
'''
if isinstance(image, SpyFile):
return TransformedImage(matrix, image)
elif isinstance(image, np.ndarray):
(M, N, B) = image.shape
ximage = np.zeros((M, N, matrix.shape[0]), float)
for i in range(M):
for j in range(N):
ximage[i, j] = np.dot(matrix, image[i, j].astype(float))
return ximage
else:
raise 'Unrecognized image type passed to transform_image.'
def orthogonalize(vecs, start=0):
'''
Performs Gram-Schmidt Orthogonalization on a set of vectors.
Arguments:
`vecs` (:class:`numpy.ndarray`):
The set of vectors for which an orthonormal basis will be created.
If there are `C` vectors of length `B`, `vecs` should be `CxB`.
`start` (int) [default 0]:
If `start` > 0, then `vecs[start]` will be assumed to already be
orthonormal.
Returns:
A new `CxB` containing an orthonormal basis for the given vectors.
'''
(M, N) = vecs.shape
basis = np.array(np.transpose(vecs))
eye = np.identity(N).astype(float)
for i in range(start, M):
if i == 0:
basis[:, 0] /= np.linalg.norm(basis[:, 0])
continue
v = basis[:, i] / np.linalg.norm(basis[:, i])
U = basis[:, :i]
P = eye - U.dot(np.linalg.inv(U.T.dot(U)).dot(U.T))
basis[:, i] = P.dot(v)
basis[:, i] /= np.linalg.norm(basis[:, i])
return np.transpose(basis)
def unmix(data, members):
'''
Perform linear unmixing on image data.
USAGE: mix = unmix(data, members)
ARGUMENTS:
data The MxNxB image data to be unmixed
members An CxB array of C endmembers
RETURN VALUE:
mix An MxNxC array of endmember fractions.
unmix performs linear unmixing on the image data. After calling the
function, mix[:,:,i] will then represent the fractional abundances
for the i'th endmember. If the result of unmix is returned into 'mix',
then an array of indices of greatest fractional endmembers is obtained
by argmax(mix).
Note that depending on endmembers given, fractional abundances for
endmembers may be negative.
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
members = members.astype(float)
# Calculate the pseudo inverse
pi = np.dot(members, np.transpose(members))
pi = np.dot(np.inv(pi), members)
(M, N, B) = data.shape
unmixed = np.zeros((M, N, members.shape[0]), float)
for i in range(M):
for j in range(N):
unmixed[i, j] = np.dot(pi, data[i, j].astype(float))
return unmixed
def spectral_angles(data, members):
'''Calculates spectral angles with respect to given set of spectra.
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of spectral angles.
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmin(angles).
'''
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
m = np.array(members, np.float64)
m /= np.sqrt(np.einsum('ij,ij->i', m, m))[:, np.newaxis]
norms = np.sqrt(np.einsum('ijk,ijk->ij', data, data))
dots = np.einsum('ijk,mk->ijm', data, m)
dots = np.clip(dots / norms[:, :, np.newaxis], -1, 1)
return np.arccos(dots)
def msam(data, members):
'''Modified SAM scores according to Oshigami, et al [1]. Endmembers are
mean-subtracted prior to spectral angle calculation. Results are
normalized such that the maximum value of 1 corresponds to a perfect match
(zero spectral angle).
Arguments:
`data` (:class:`numpy.ndarray` or :class:`spectral.Image`):
An `MxNxB` image for which spectral angles will be calculated.
`members` (:class:`numpy.ndarray`):
`CxB` array of spectral endmembers.
Returns:
`MxNxC` array of MSAM scores with maximum value of 1 corresponding
to a perfect match (zero spectral angle).
Calculates the spectral angles between each vector in data and each of the
endmembers. The output of this function (angles) can be used to classify
the data by minimum spectral angle by calling argmax(angles).
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, and
<NAME>. 2013. Mineralogical mapping of southern Namibia by application
of continuum-removal MSAM method to the HyMap data. Int. J. Remote Sens.
34, 15 (August 2013), 5282-5295.
'''
# The modifications to the `spectral_angles` function were contributed by
# <NAME>.
assert members.shape[1] == data.shape[2], \
'Matrix dimensions are not aligned.'
(M, N, B) = data.shape
m = np.array(members, np.float64)
C = m.shape[0]
# Normalize endmembers
for i in range(C):
# Fisher z trafo type operation
m[i] -= np.mean(m[i])
m[i] /= np.sqrt(m[i].dot(m[i]))
angles = np.zeros((M, N, C), np.float64)
for i in range(M):
for j in range(N):
#Fisher z trafo type operation
v = data[i, j] - np.mean(data[i, j])
v /= np.sqrt(v.dot(v))
v = np.clip(v, -1, 1)
for k in range(C):
# Calculate Mineral Index according to Oshigami et al.
# (Intnl. J. of Remote Sens. 2013)
a = np.clip(v.dot(m[k]), -1, 1)
angles[i,j,k]= 1.0 - np.arccos(a) / (math.pi / 2)
return angles
def noise_from_diffs(X, direction='lowerright'):
'''Estimates noise statistcs by taking differences of adjacent pixels.
Arguments:
`X` (np.ndarray):
The data from which to estimage noise statistics. `X` should have
shape `(nrows, ncols, nbands`).
`direction` (str, default "lowerright"):
The pixel direction along which to calculate pixel differences.
Must be one of the following:
'lowerright':
Take difference with pixel diagonally to lower right
'lowerleft':
Take difference with pixel diagonally to lower right
'right':
Take difference with pixel to the right
'lower':
Take differenece with pixel below
Returns a :class:`~spectral.algorithms.algorithms.GaussianStats` object.
'''
if direction.lower() not in ['lowerright', 'lowerleft', 'right', 'lower']:
raise ValueError('Invalid `direction` value.')
if direction == 'lowerright':
deltas = X[:-1, :-1, :] - X[1:, 1:, :]
elif direction == 'lowerleft':
deltas = X[:-1, 1:, :] - X[1:, :-1, :]
elif direction == 'right':
deltas = X[:, :-1, :] - X[:, 1:, :]
else:
deltas = X[:-1, :, :] - X[1:, :, :]
stats = calc_stats(deltas)
stats.cov /= 2.0
return stats
class MNFResult(object):
'''Result object returned by :func:`~spectral.algorithms.algorithms.mnf`.
This object contains data associates with a Minimum Noise Fraction
calculation, including signal and noise statistics, as well as the
Noise-Adjusted Principal Components (NAPC). This object can be used to
denoise image data or to reduce its dimensionality.
'''
def __init__(self, signal, noise, napc):
'''
Arguments:
`signal` (:class:`~spectral.GaussianStats`):
Signal statistics
`noise` (:class:`~spectral.GaussianStats`):
Noise statistics
`napc` (:class:`~spectral.PrincipalComponents`):
Noise-Adjusted Pricipal Components
'''
self.signal = signal
self.noise = noise
self.napc = napc
def _num_from_kwargs(self, **kwargs):
'''Returns number of components to retain for the given kwargs.'''
for key in kwargs:
if key not in ('num', 'snr'):
raise Exception('Keyword not recognized.')
num = kwargs.get('num', None)
snr = kwargs.get('snr', None)
if num == snr == None:
raise Exception('Must specify either `num` or `snr` keyword.')
if None not in (num, snr):
raise Exception('Can not specify both `num` and `snr` keywords.')
if snr is not None:
num = self.num_with_snr(snr)
return num
def denoise(self, X, **kwargs):
'''Returns a de-noised version of `X`.
Arguments:
`X` (np.ndarray):
Data to be de-noised. Can be a single pixel or an image.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns denoised image data with same shape as `X`.
Note that calling this method is equivalent to calling the
`get_denoising_transform` method with same keyword and applying the
returned transform to `X`. If you only intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
denoising transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_denoising_transform(**kwargs)
return f(X)
def get_denoising_transform(self, **kwargs):
'''Returns a function for denoising image data.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for denoising image data.
'''
N = self._num_from_kwargs(**kwargs)
V = self.napc.eigenvectors
Vr = np.array(V)
Vr[:, N:] = 0.
f = LinearTransform(self.noise.sqrt_cov.dot(Vr).dot(V.T) \
.dot(self.noise.sqrt_inv_cov),
pre=-self.signal.mean,
post=self.signal.mean)
return f
def reduce(self, X, **kwargs):
'''Reduces dimensionality of image data.
Arguments:
`X` (np.ndarray):
Data to be reduced. Can be a single pixel or an image.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a verions of `X` with reduced dimensionality.
Note that calling this method is equivalent to calling the
`get_reduction_transform` method with same keyword and applying the
returned transform to `X`. If you intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
reduction transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_reduction_transform(**kwargs)
return f(X)
def get_reduction_transform(self, **kwargs):
'''Reduces dimensionality of image data.
One (and only one) of the following keywords must be specified:
`num` (int):
Number of Noise-Adjusted Principal Components to retain.
`snr` (float):
Threshold signal-to-noise ratio (SNR) to retain.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for reducing the dimensionality of image data.
'''
N = self._num_from_kwargs(**kwargs)
V = self.napc.eigenvectors
f = LinearTransform(V[:, :N].T.dot(self.noise.sqrt_inv_cov),
pre=-self.signal.mean)
return f
def num_with_snr(self, snr):
'''Returns the number of components with SNR >= `snr`.'''
return np.sum(self.napc.eigenvalues >= (snr + 1))
def mnf(signal, noise):
'''Computes Minimum Noise Fraction / Noise-Adjusted Principal Components.
Arguments:
`signal` (:class:`~spectral.algorithms.algorithms.GaussianStats`):
Estimated signal statistics
`noise` (:class:`~spectral.algorithms.algorithms.GaussianStats`):
Estimated noise statistics
Returns an :class:`~spectral.algorithms.algorithms.MNFResult` object,
containing the Noise-Adjusted Principal Components (NAPC) and methods for
denoising or reducing dimensionality of associated data.
The Minimum Noise Fraction (MNF) is similar to the Principal Components
transformation with the difference that the Principal Components associated
with the MNF are ordered by descending signal-to-noise ratio (SNR) rather
than overall image variance. Note that the eigenvalues of the NAPC are
equal to one plus the SNR in the transformed space (since noise has
whitened unit variance in the NAPC coordinate space).
Example:
>>> data = open_image('92AV3C.lan').load()
>>> signal = calc_stats(data)
>>> noise = noise_from_diffs(data[117: 137, 85: 122, :])
>>> mnfr = mnf(signal, noise)
>>> # De-noise the data by eliminating NAPC components where SNR < 10.
>>> # The de-noised data will be in the original coordinate space (at
>>> # full dimensionality).
>>> denoised = mnfr.denoise(data, snr=10)
>>> # Reduce dimensionality, retaining NAPC components where SNR >= 10.
>>> reduced = mnfr.reduce(data, snr=10)
>>> # Reduce dimensionality, retaining top 50 NAPC components.
>>> reduced = mnfr.reduce(data, num=50)
References:
Lee, <NAME>., <NAME>, and <NAME>. "Enhancement of
high spectral resolution remote-sensing data by a noise-adjusted
principal components transform." Geoscience and Remote Sensing, IEEE
Transactions on 28.3 (1990): 295-304.
'''
C = noise.sqrt_inv_cov.dot(signal.cov).dot(noise.sqrt_inv_cov)
(L, V) = np.linalg.eig(C)
# numpy says eigenvalues may not be sorted so we'll sort them, if needed.
if not np.alltrue(np.diff(L) <= 0):
ii = list(reversed(np.argsort(L)))
L = L[ii]
V = V[:, ii]
wstats = GaussianStats(mean=np.zeros_like(L), cov=C)
napc = PrincipalComponents(L, V, wstats)
return MNFResult(signal, noise, napc)
def ppi(X, niters, threshold=0, centered=False, start=None, display=0,
**imshow_kwargs):
'''Returns pixel purity indices for an image.
Arguments:
`X` (ndarray):
Image data for which to calculate pixel purity indices
`niters` (int):
Number of iterations to perform. Each iteration corresponds to a
projection of the image data onto a random unit vector.
`threshold` (numeric):
If this value is zero, only the two most extreme pixels will have
their indices incremented for each random vector. If the value is
greater than zero, then all pixels whose projections onto the
random vector are with `threshold` data units of either of the two
extreme pixels will also have their indices incremented.
`centered` (bool):
If True, then the pixels in X are assumed to have their mean
already subtracted; otherwise, the mean of `X` will be computed
and subtracted prior to computing the purity indices.
`start` (ndarray):
An optional array of initial purity indices. This can be used to
continue computing PPI values after a previous call to `ppi` (i.e.,
set `start` equal to the return value from a previou call to `ppi`.
This should be an integer-valued array whose dimensions are equal
to the first two dimensions of `X`.
`display` (integer):
If set to a postive integer, a :class:`~spectral.graphics.spypylab.ImageView`
window will be opened and dynamically display PPI values as the
function iterates. The value specifies the number of PPI iterations
between display updates. It is recommended to use a value around
100 or higher. If the `stretch` keyword (see :func:`~spectral.graphics.graphics.get_rgb`
for meaning) is not provided, a default stretch of (0.99, 0.999)
is used.
Return value:
An ndarray of integers that represent the pixel purity indices of the
input image. The return array will have dimensions equal to the first
two dimensions of the input image.
Keyword Arguments:
Any keyword accepted by :func:`~spectral.graphics.spypylab.imshow`.
These keywords will be passed to the image display and only have an
effect if the `display` argument is nonzero.
This function can be interruped with a KeyboardInterrupt (ctrl-C), in which
case, the most recent value of the PPI array will be returned. This can be
used in conjunction with the `display` argument to view the progression of
the PPI values until they appear stable, then terminate iteration using
ctrl-C.
References:
<NAME>., <NAME>, and <NAME>., "Mapping Target Signatures via
Partial Unmixing of AVIRIS Data," Pasadena, California, USA, 23 Jan 1995,
URI: http://hdl.handle.net/2014/33635
'''
if display is not None:
if not isinstance(display, Integral) or isinstance(display, bool) or \
display < 0:
msg = '`display` argument must be a non-negative integer.'
raise ValueError(msg)
if not centered:
stats = calc_stats(X)
X = X - stats.mean
shape = X.shape
X = X.reshape(-1, X.shape[-1])
nbands = X.shape[-1]
fig = None
updating = False
if start is not None:
counts = np.array(start.ravel())
else:
counts = np.zeros(X.shape[0], dtype=np.uint32)
if 'stretch' not in imshow_kwargs:
imshow_kwargs['stretch'] = (0.99, 0.999)
msg = 'Running {0} pixel purity iterations...'.format(niters)
spy._status.display_percentage(msg)
try:
for i in range(niters):
r = np.random.rand(nbands) - 0.5
r /= np.sqrt(np.sum(r * r))
s = X.dot(r)
imin = np.argmin(s)
imax = np.argmax(s)
updating = True
if threshold == 0:
# Only the two extreme pixels are incremented
counts[imin] += 1
counts[imax] += 1
else:
# All pixels within threshold distance from the two extremes
counts[s >= (s[imax] - threshold)] += 1
counts[s <= (s[imin] + threshold)] += 1
updating = False
if display > 0 and (i + 1) % display == 0:
if fig is not None:
fig.set_data(counts.reshape(shape[:2]), **imshow_kwargs)
else:
fig = spy.imshow(counts.reshape(shape[:2]), **imshow_kwargs)
fig.set_title('PPI ({} iterations)'.format(i + 1))
if not (i + 1) % 10:
spy._status.update_percentage(100 * (i + 1) / niters)
except KeyboardInterrupt:
spy._status.end_percentage('interrupted')
if not updating:
msg = 'KeyboardInterrupt received. Returning pixel purity ' \
'values after {0} iterations.'.format(i)
spy._status.write(msg)
return counts.reshape(shape[:2])
else:
msg = 'KeyboardInterrupt received during array update. PPI ' \
'values may be corrupt. Returning None'
spy._status.write(msg)
return None
spy._status.end_percentage()
return counts.reshape(shape[:2])
def smacc(spectra, min_endmembers=None, max_residual_norm=float('Inf')):
'''Returns SMACC decomposition (H = F * S + R) matrices for an image or
array of spectra.
Let `H` be matrix of shape NxB, where B is number of bands, and N number of
spectra, then if `spectra` is of the same shape, `H` will be equal to `spectra`.
Otherwise, `spectra` is assumed to be 3D spectral image, and it is reshaped
to match shape of `H`.
Arguments:
`spectra` (ndarray):
Image data for which to calculate SMACC decomposition matrices.
`min_endmembers` (int):
Minimal number of endmembers to find. Defaults to rank of `H`,
computed numerically with `numpy.linalg.matrix_rank`.
`max_residual_norm`:
Maximum value of residual vectors' norms. Algorithm will keep finding
new endmembers until max value of residual norms is less than this
argument. Defaults to float('Inf')
Returns:
3 matrices, S, F and R, such that H = F * S + R (but it might not always hold).
F is matrix of expansion coefficients of shape N x num_endmembers.
All values of F are equal to, or greater than zero.
S is matrix of endmember spectra, extreme vectors, of shape num_endmembers x B.
R is matrix of residuals of same shape as H (N x B).
If values of H are large (few tousands), H = F * S + R, might not hold,
because of numeric errors. It is advisable to scale numbers, by dividing
by 10000, for example. Depending on how accurate you want it to be,
you can check if H is really strictly equal to F * S + R,
and adjust R: R = H - np.matmul(F, S).
References:
<NAME>, <NAME>, and <NAME> "The sequential
maximum angle convex cone (SMACC) endmember model", Proc. SPIE 5425, Algorithms
and Technologies for Multispectral, Hyperspectral, and Ultraspectral Imagery X,
(12 August 2004); https://doi.org/10.1117/12.543794
'''
# Indices of vectors in S.
q = []
H = spectra if len(spectra.shape) == 2 else spectra.reshape(
(spectra.shape[0] * spectra.shape[1], spectra.shape[2]))
R = H
Fs = []
F = None
S = None
if min_endmembers is None:
min_endmembers = np.linalg.matrix_rank(H)
# Add the longest vector to q.
residual_norms = np.sqrt(np.einsum('ij,ij->i', H, H))
current_max_residual_norm = np.max(residual_norms)
if max_residual_norm is None:
max_residual_norm = current_max_residual_norm / min_endmembers
while len(q) < min_endmembers or current_max_residual_norm > max_residual_norm:
q.append(np.argmax(residual_norms))
n = len(q) - 1
# Current basis vector.
w = R[q[n]]
# Temporary to be used for projection calculation.
wt = w / (np.dot(w, w))
# Calculate projection coefficients.
On = np.dot(R, wt)
alpha = np.ones(On.shape, dtype=np.float64)
# Make corrections to satisfy convex cone conditions.
# First correct alphas for oblique projection when needed.
for k in range(len(Fs)):
t = On * Fs[k][q[n]]
# This is not so important for the algorithm itself.
# These values correpond to values where On == 0.0, and these
# will be zeroed out below. But to avoid divide-by-zero warning
# we set small values instead of zero.
t[t == 0.0] = 1e-10
np.minimum(Fs[k]/t, alpha, out=alpha)
# Clip negative projection coefficients.
alpha[On <= 0.0] = 0.0
# Current extreme vector should always be removed completely.
alpha[q[n]] = 1.0
# Calculate oblique projection coefficients.
Fn = alpha * On
# Correction for numerical stability.
Fn[Fn <= 0.0] = 0.0
# Remove projection to current basis from R.
R = R - np.outer(Fn, w)
# Update projection coefficients.
for k in range(len(Fs)):
Fs[k] -= Fs[k][q[n]] * Fn
# Correction because of numerical problems.
Fs[k][Fs[k] <= 0.0] = 0.0
# Add new Fn.
Fs.append(Fn)
residual_norms[:] = np.sqrt(np.einsum('ij,ij->i', R, R))
current_max_residual_norm = np.max(residual_norms)
print('Found {0} endmembers, current max residual norm is {1:.4f}\r'
.format(len(q), current_max_residual_norm), end='')
# Correction as suggested in the SMACC paper.
for k, s in enumerate(q):
Fs[k][q] = 0.0
Fs[k][s] = 1.0
F = np.array(Fs).T
S = H[q]
# H = F * S + R
return S, F, R | 0.869382 | 0.450359 |
import boto3
from botocore.exceptions import ClientError
import pytest
from websocket_chat import ApiGatewayWebsocket
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_create_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
api_name = 'test-api_name'
sock_gate = ApiGatewayWebsocket(api_name, apigatewayv2_client)
route_selection = 'test-route_selection'
api_id = 'test-api_id'
api_endpoint = 'test-api_endpoint'
apigatewayv2_stubber.stub_create_api(
api_name, 'WEBSOCKET', route_selection, api_id, api_endpoint,
error_code=error_code)
if error_code is None:
got_api_id = sock_gate.create_api(route_selection)
assert got_api_id == api_id
assert sock_gate.api_id == api_id
assert sock_gate.api_endpoint == api_endpoint
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.create_api(route_selection)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_policy'),
('TestException', 'stub_attach_role_policy')])
def test_add_connection_permissions(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
account = 'test-account'
role_name = 'test-lambda-role'
policy_name = f'{role_name}-{sock_gate.permission_policy_suffix}'
policy_arn = f':arn:aws:iam:REGION:123456789012:policy/{policy_name}'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(iam_stubber.stub_create_policy, policy_name, policy_arn)
runner.add(iam_stubber.stub_attach_role_policy, role_name, policy_arn)
runner.add(iam_stubber.stub_get_policy, policy_arn)
if error_code is not None and stop_on_method != 'stub_create_policy':
iam_stubber.stub_delete_policy(policy_arn)
if error_code is None:
sock_gate.add_connection_permissions(account, role_name, iam_resource)
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.add_connection_permissions(account, role_name, iam_resource)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_attached_role_policies'),
('TestException', 'stub_get_policy'),
('TestException', 'stub_detach_role_policy'),
('TestException', 'stub_delete_policy')])
def test_remove_connection_permissions(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
role_name = 'test-role_name'
policy_name = f'{role_name}-{sock_gate.permission_policy_suffix}'
policy_arn = f'arn:aws:iam:REGION:123456789012:{policy_name}'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
iam_stubber.stub_list_attached_role_policies, role_name,
{policy_name: policy_arn})
runner.add(iam_stubber.stub_get_policy, policy_arn)
runner.add(iam_stubber.stub_detach_role_policy, role_name, policy_arn)
runner.add(iam_stubber.stub_delete_policy, policy_arn)
if error_code is None:
sock_gate.remove_connection_permissions(iam_resource.Role(role_name))
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.remove_connection_permissions(iam_resource.Role(role_name))
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_integration'),
('TestException', 'stub_create_route'),
('TestException', 'stub_add_permission')])
def test_add_route_and_integration(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
lambda_client = boto3.client('lambda')
lambda_stubber = make_stubber(lambda_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
integration_id = 'test-integration_id'
route_name = 'test-route_name'
lambda_func = {
'FunctionName': 'test-function-name',
'FunctionArn': 'arn:aws:lambda:REGION:12345679012:function/test'}
route_id = 'test-route_id'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
apigatewayv2_stubber.stub_create_integration, sock_gate.api_id,
integration_id)
runner.add(
apigatewayv2_stubber.stub_create_route, sock_gate.api_id, route_name,
f'integrations/{integration_id}', route_id)
runner.add(
lambda_stubber.stub_add_permission, lambda_func['FunctionName'],
'lambda:InvokeFunction', 'apigateway.amazonaws.com')
if error_code is None:
got_route_id = sock_gate.add_route_and_integration(
route_name, lambda_func, lambda_client)
assert got_route_id == route_id
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.add_route_and_integration(
route_name, lambda_func, lambda_client)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_deploy_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
sock_gate.api_endpoint = 'test-api_endpoint'
stage = 'test-stage'
uri = f'{sock_gate.api_endpoint}/{stage}'
apigatewayv2_stubber.stub_create_stage(
sock_gate.api_id, stage, error_code=error_code)
if error_code is None:
got_uri = sock_gate.deploy_api(stage)
assert got_uri == uri
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.deploy_api(stage)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_get_websocket_api_info(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
api_id = 'test-api_id'
api_endpoint = 'test-api_endpoint'
apigatewayv2_stubber.stub_get_apis([
{'Name': sock_gate.api_name, 'ApiId': api_id, 'ApiEndpoint': api_endpoint}],
error_code=error_code)
if error_code is None:
got_api_id, got_api_endpoint = sock_gate.get_websocket_api_info()
assert got_api_id == api_id
assert got_api_endpoint == api_endpoint
assert sock_gate.api_id == api_id
assert sock_gate.api_endpoint == api_endpoint
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.get_websocket_api_info()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_delete_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
api_id = 'test-api_id'
apigatewayv2_stubber.stub_get_apis([
{'Name': sock_gate.api_name, 'ApiId': api_id, 'ApiEndpoint': ''}])
apigatewayv2_stubber.stub_delete_api(api_id, error_code=error_code)
if error_code is None:
sock_gate.delete_api()
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.delete_api()
assert exc_info.value.response['Error']['Code'] == error_code | python/cross_service/apigateway_websocket_chat/test/test_websocket_chat.py | import boto3
from botocore.exceptions import ClientError
import pytest
from websocket_chat import ApiGatewayWebsocket
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_create_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
api_name = 'test-api_name'
sock_gate = ApiGatewayWebsocket(api_name, apigatewayv2_client)
route_selection = 'test-route_selection'
api_id = 'test-api_id'
api_endpoint = 'test-api_endpoint'
apigatewayv2_stubber.stub_create_api(
api_name, 'WEBSOCKET', route_selection, api_id, api_endpoint,
error_code=error_code)
if error_code is None:
got_api_id = sock_gate.create_api(route_selection)
assert got_api_id == api_id
assert sock_gate.api_id == api_id
assert sock_gate.api_endpoint == api_endpoint
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.create_api(route_selection)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_policy'),
('TestException', 'stub_attach_role_policy')])
def test_add_connection_permissions(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
account = 'test-account'
role_name = 'test-lambda-role'
policy_name = f'{role_name}-{sock_gate.permission_policy_suffix}'
policy_arn = f':arn:aws:iam:REGION:123456789012:policy/{policy_name}'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(iam_stubber.stub_create_policy, policy_name, policy_arn)
runner.add(iam_stubber.stub_attach_role_policy, role_name, policy_arn)
runner.add(iam_stubber.stub_get_policy, policy_arn)
if error_code is not None and stop_on_method != 'stub_create_policy':
iam_stubber.stub_delete_policy(policy_arn)
if error_code is None:
sock_gate.add_connection_permissions(account, role_name, iam_resource)
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.add_connection_permissions(account, role_name, iam_resource)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_list_attached_role_policies'),
('TestException', 'stub_get_policy'),
('TestException', 'stub_detach_role_policy'),
('TestException', 'stub_delete_policy')])
def test_remove_connection_permissions(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
iam_resource = boto3.resource('iam')
iam_stubber = make_stubber(iam_resource.meta.client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
role_name = 'test-role_name'
policy_name = f'{role_name}-{sock_gate.permission_policy_suffix}'
policy_arn = f'arn:aws:iam:REGION:123456789012:{policy_name}'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
iam_stubber.stub_list_attached_role_policies, role_name,
{policy_name: policy_arn})
runner.add(iam_stubber.stub_get_policy, policy_arn)
runner.add(iam_stubber.stub_detach_role_policy, role_name, policy_arn)
runner.add(iam_stubber.stub_delete_policy, policy_arn)
if error_code is None:
sock_gate.remove_connection_permissions(iam_resource.Role(role_name))
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.remove_connection_permissions(iam_resource.Role(role_name))
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code,stop_on_method', [
(None, None),
('TestException', 'stub_create_integration'),
('TestException', 'stub_create_route'),
('TestException', 'stub_add_permission')])
def test_add_route_and_integration(
make_stubber, stub_runner, error_code, stop_on_method):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
lambda_client = boto3.client('lambda')
lambda_stubber = make_stubber(lambda_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
integration_id = 'test-integration_id'
route_name = 'test-route_name'
lambda_func = {
'FunctionName': 'test-function-name',
'FunctionArn': 'arn:aws:lambda:REGION:12345679012:function/test'}
route_id = 'test-route_id'
with stub_runner(error_code, stop_on_method) as runner:
runner.add(
apigatewayv2_stubber.stub_create_integration, sock_gate.api_id,
integration_id)
runner.add(
apigatewayv2_stubber.stub_create_route, sock_gate.api_id, route_name,
f'integrations/{integration_id}', route_id)
runner.add(
lambda_stubber.stub_add_permission, lambda_func['FunctionName'],
'lambda:InvokeFunction', 'apigateway.amazonaws.com')
if error_code is None:
got_route_id = sock_gate.add_route_and_integration(
route_name, lambda_func, lambda_client)
assert got_route_id == route_id
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.add_route_and_integration(
route_name, lambda_func, lambda_client)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_deploy_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
sock_gate.api_id = 'test-api_id'
sock_gate.api_endpoint = 'test-api_endpoint'
stage = 'test-stage'
uri = f'{sock_gate.api_endpoint}/{stage}'
apigatewayv2_stubber.stub_create_stage(
sock_gate.api_id, stage, error_code=error_code)
if error_code is None:
got_uri = sock_gate.deploy_api(stage)
assert got_uri == uri
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.deploy_api(stage)
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_get_websocket_api_info(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
api_id = 'test-api_id'
api_endpoint = 'test-api_endpoint'
apigatewayv2_stubber.stub_get_apis([
{'Name': sock_gate.api_name, 'ApiId': api_id, 'ApiEndpoint': api_endpoint}],
error_code=error_code)
if error_code is None:
got_api_id, got_api_endpoint = sock_gate.get_websocket_api_info()
assert got_api_id == api_id
assert got_api_endpoint == api_endpoint
assert sock_gate.api_id == api_id
assert sock_gate.api_endpoint == api_endpoint
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.get_websocket_api_info()
assert exc_info.value.response['Error']['Code'] == error_code
@pytest.mark.parametrize('error_code', [None, 'TestException'])
def test_delete_api(make_stubber, error_code):
apigatewayv2_client = boto3.client('apigatewayv2')
apigatewayv2_stubber = make_stubber(apigatewayv2_client)
sock_gate = ApiGatewayWebsocket('test-api', apigatewayv2_client)
api_id = 'test-api_id'
apigatewayv2_stubber.stub_get_apis([
{'Name': sock_gate.api_name, 'ApiId': api_id, 'ApiEndpoint': ''}])
apigatewayv2_stubber.stub_delete_api(api_id, error_code=error_code)
if error_code is None:
sock_gate.delete_api()
else:
with pytest.raises(ClientError) as exc_info:
sock_gate.delete_api()
assert exc_info.value.response['Error']['Code'] == error_code | 0.324771 | 0.216146 |
from urllib import request, parse
import time
import random
import hashlib
import json
import requests
def get_ts(): #时间戳 乘以1000 取整
ts = str(int(1000 * time.time()))
return ts
def get_salt(ts):
salt = ts + str(random.randint(0, 10)) # 上面的方法返回值 加上1-10之间的随机数
return salt
def get_sign(words, salt):
content = 'fanyideskweb' + words + salt + 'n%A-rKaT5fb[Gy?;N5@Tj'
sign = hashlib.md5(content.encode()).hexdigest()
return sign
def translate(words, ts, salt, sign):
url = "http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
data = {
"i": words,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": salt,
"sign": sign,
'ts': ts,
'bv': 'bbb3ed55971873051bc2ff740579bb49',
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_REALTIME",
"typoResult": "false"
}
data = parse.urlencode(data).encode()
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': len(data),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': '__guid=204659719.2422785200799945700.1554675512727.244; OUTFOX_SEARCH_USER_ID=-1327275086@10.169.0.82; OUTFOX_SEARCH_USER_ID_NCOO=378292303.3354687; JSESSIONID=aaaLYwaICIOxi6ofRh8Nw; monitor_count=8; ___rl__test__cookies=1554693830913',
'Host': 'fanyi.youdao.com',
'Origin': 'http://fanyi.youdao.com',
'Referer': 'http://fanyi.youdao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
req = request.Request(url=url, data=data, headers=headers)
rsp = request.urlopen(req)
print(rsp.getcode())
html = rsp.read().decode('utf-8')
json_data = json.loads(html)
print(json_data)
print('翻译的结果为:' + json_data['translateResult'][0][0]['tgt'])
if __name__ == '__main__':
words = input('请输入要翻译的内容:')
ts = get_ts()
salt = get_salt(ts)
sign = get_sign(words, salt)
translate(words, ts, salt, sign) | translate2.py | from urllib import request, parse
import time
import random
import hashlib
import json
import requests
def get_ts(): #时间戳 乘以1000 取整
ts = str(int(1000 * time.time()))
return ts
def get_salt(ts):
salt = ts + str(random.randint(0, 10)) # 上面的方法返回值 加上1-10之间的随机数
return salt
def get_sign(words, salt):
content = 'fanyideskweb' + words + salt + 'n%A-rKaT5fb[Gy?;N5@Tj'
sign = hashlib.md5(content.encode()).hexdigest()
return sign
def translate(words, ts, salt, sign):
url = "http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
data = {
"i": words,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": salt,
"sign": sign,
'ts': ts,
'bv': 'bbb3ed55971873051bc2ff740579bb49',
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_REALTIME",
"typoResult": "false"
}
data = parse.urlencode(data).encode()
headers = {
'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': len(data),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': '__guid=204659719.2422785200799945700.1554675512727.244; OUTFOX_SEARCH_USER_ID=-1327275086@10.169.0.82; OUTFOX_SEARCH_USER_ID_NCOO=378292303.3354687; JSESSIONID=aaaLYwaICIOxi6ofRh8Nw; monitor_count=8; ___rl__test__cookies=1554693830913',
'Host': 'fanyi.youdao.com',
'Origin': 'http://fanyi.youdao.com',
'Referer': 'http://fanyi.youdao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
req = request.Request(url=url, data=data, headers=headers)
rsp = request.urlopen(req)
print(rsp.getcode())
html = rsp.read().decode('utf-8')
json_data = json.loads(html)
print(json_data)
print('翻译的结果为:' + json_data['translateResult'][0][0]['tgt'])
if __name__ == '__main__':
words = input('请输入要翻译的内容:')
ts = get_ts()
salt = get_salt(ts)
sign = get_sign(words, salt)
translate(words, ts, salt, sign) | 0.268845 | 0.11158 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Cohort',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('candidate_number', models.CharField(max_length=10, unique=True)),
('status', models.CharField(choices=[('act', 'Active'), ('dor', 'DOR'), ('med', 'Medical Drop'), ('perf', 'Performance Drop')], default='act', max_length=20)),
('status_comment', models.CharField(blank=True, default='', max_length=240, null=True)),
('cohort', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.cohort')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.source')),
('team', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.team')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.user')),
],
),
] | stucon/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Cohort',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('candidate_number', models.CharField(max_length=10, unique=True)),
('status', models.CharField(choices=[('act', 'Active'), ('dor', 'DOR'), ('med', 'Medical Drop'), ('perf', 'Performance Drop')], default='act', max_length=20)),
('status_comment', models.CharField(blank=True, default='', max_length=240, null=True)),
('cohort', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.cohort')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.source')),
('team', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='stucon.team')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.user')),
],
),
] | 0.557123 | 0.144722 |
import sys
sys.path.append('lib/tweepy')
import boto3
import json
import io
import urllib.request
import tweepy
import datetime
#自作関数
import functions
#Twitterの認証
twitter = json.loads(functions.get_secret())
CK = twitter["TWITTER_CK"]
CS = twitter["TWITTER_CS"]
AT = twitter["TWITTER_AT"]
AS = twitter["TWITTER_AS"]
#検索設定
SEARCH_TEXT = "(#VRC OR #VRChat OR #VRCSnap OR #VRCSnap! OR #今日のVRChat)"
SEARCH_COUNT = 100
#自撮り判定設定
PERSON_THRESHOLD = 75
#AWS設定
try:
#DynamoDB設定
dynamoDB = boto3.resource('dynamodb', 'ap-northeast-1')
table = dynamoDB.Table("tweet2rekognition")
history_table = dynamoDB.Table("tweet2rekognition_history")
user_table = dynamoDB.Table("tweet2rekognition_user")
#Rekognition設定
rekognition = boto3.client('rekognition', 'ap-northeast-1')
except Exception as e:
raise('AWS Setup Error: ' + str(e))
finally:
print('Finish AWS Setup')
#Twitterのスクレイピング
class TweetScraper:
def __init__(self):
self.tweet_data = []
self.set_twitter_api()
#Twitterオブジェクトの生成
def set_twitter_api(self):
try:
auth = tweepy.OAuthHandler(CK, CS)
auth.set_access_token(AT, AS)
self.api = tweepy.API(auth)
except Exception as e:
raise('Twitter API Setup Error: ' + str(e))
finally:
print('Set Twitter API Object')
#画像ありツイートをSEARCH_TEXTでSEARCHCOUNTだけ検索
def search(self):
try:
for result in self.api.search(q='{} -filter:retweets filter:images'.format(SEARCH_TEXT), result_type='recent', count=SEARCH_COUNT):
url = 'https://twitter.com/{}/status/{}'.format(result.user.screen_name, result.id)
text = result.text.replace('\n', '')
#画像ありツイートのみ抽出
if 'media' in result.entities.keys():
#画像の個数
num_media = len(result.extended_entities["media"])
#データ入力
self.tweet_data.append({"id": functions.return_decimal(result.id),
"user_name": result.user.name,
"user_screen_name": result.user.screen_name,
"user_profile_image": result.user.profile_image_url_https,
"user_profile_banner": result.user._json.get("profile_banner_url"),
"user_profile_description": result.user.description,
"user_profile_url": result.user.url,
"user_profile_follow_count": result.user.friends_count,
"user_profile_follower_count": result.user.followers_count,
"text": result.text,
"hour_count": 0,
"favorite_count": functions.return_decimal(result.favorite_count),
"past_favorite": 0,
"d_fav": 0,
"retweet_count": functions.return_decimal(result.retweet_count),
"past_retweet": 0,
"d_RT": 0,
"created_at": functions.return_decimal(result.created_at.timestamp()),
"url": url,
"img": []
})
for i in range(num_media):
self.tweet_data[-1]["img"].append({
"id": result.extended_entities["media"][i]["id_str"],
"url": result.extended_entities["media"][i]["media_url_https"],
"labels": [],
"bounding_box": []
})
except Exception as e:
print('Twitter Search Error: ' + str(e))
finally:
print('Finish Twitter Search')
#Rekognitionでラベル付け
class SendRekognition:
def __init__(self, data):
self.data = data
def send(self, img):
try:
return rekognition.detect_labels(Image={"Bytes": img}, MaxLabels=10)
except Exception as e:
print('Rekognition Error: ' + str(e))
def checking_img(self, i, j, labels):
label_names = [l["Name"] for l in labels] #ラベル名(Key)のリスト
keyword_set = ["Clothing", "Fashion", "Apparel", "Accessories", "Accessory"]
# labelが"Name": "Person"を持ち、keyword_setのうちどれかをラベルに持つ場合
if "Person" in label_names and (len(set(label_names) & set(keyword_set)) != 0):
for label in labels:
# Nameが"Person"でかつ、ConfidenceがPERSON_THRESHOLDを超え、BoundingBoxを持つ場合
if label["Name"] == "Person" and label["Confidence"] > PERSON_THRESHOLD and len(label["Instances"]) > 0 and "BoundingBox" in [b for b in label["Instances"][0]]:
self.data[i]["img"][j]["labels"] = label_names
for b in range(len(label["Instances"])):
if label["Instances"][b]["Confidence"] > PERSON_THRESHOLD:
#BoundingBoxをDecimal変換
label["Instances"][b]["BoundingBox"]["Width"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Width"])
label["Instances"][b]["BoundingBox"]["Height"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Height"])
label["Instances"][b]["BoundingBox"]["Left"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Left"])
label["Instances"][b]["BoundingBox"]["Top"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Top"])
self.data[i]["img"][j]["bounding_box"].append((label["Instances"][b]["BoundingBox"]))
break
def add_labels(self):
try:
#各ツイート
new_tweet_count = 0
update_tweet_count = 0
#DynamoDBで最新のツイートIDを取得
latest_tweet = functions.get_latest_tweet_id(history_table)
latest_tweet_id = int(latest_tweet[0]["id"]) if len(latest_tweet) > 0 else 0
print("latest_tweet_id: {}".format(latest_tweet_id))
for i in range(len(self.data)):
if self.data[i]["id"] > latest_tweet_id:
#各ツイート内の各画像
for j in range(len(self.data[i]["img"])):
img_in = urllib.request.urlopen(self.data[i]["img"][j]["url"]).read()
img_bin = io.BytesIO(img_in)
result = self.send(img_in)
self.checking_img(i, j, result["Labels"])
if len(self.data[i]["img"]) > 0:
new_tweet_count += 1
else:
#取得済みツイートの場合
scanned_tweet = functions.get_tweet_id(table, self.data[i]["id"])
if len(scanned_tweet) > 0:
self.data[i]["img"] = json.loads(scanned_tweet[0]["img"])
#d_fav, d_RTの計算
self.data[i]["hour_count"] = scanned_tweet[0].get("hour_count", 0)
self.data[i]["past_favorite"] = scanned_tweet[0].get("past_favorite", 0)
self.data[i]["d_fav"] = scanned_tweet[0].get("d_fav", 0)
self.data[i]["past_retweet"] = scanned_tweet[0].get("past_retweet", 0)
self.data[i]["d_RT"] = scanned_tweet[0].get("d_RT", 0)
#毎時の判定(このlambda functionは10分おきに起動)
if self.data[i]["hour_count"] == 6:
self.data[i]["d_fav"] = self.data[i]["favorite_count"] - self.data[i]["past_favorite"]
self.data[i]["past_favorite"] = self.data[i]["favorite_count"]
self.data[i]["d_RT"] = self.data[i]["retweet_count"] - self.data[i]["past_retweet"]
self.data[i]["past_retweet"] = self.data[i]["retweet_count"]
self.data[i]["hour_count"] = 0
else:
#hour_countのカウントアップ
self.data[i]["hour_count"] += 1
update_tweet_count +=1
except Exception as e:
print("Add Labels Error: " +str(e))
finally:
print("Finish Labeling, Add {} Tweet, Update {} Tweet".format(new_tweet_count, update_tweet_count))
#DynamoDBにデータを送信
class SendDynamoDB:
def __init__(self, data):
self.data = data
def put(self):
try:
count = 0 # NewTweetCount
updated_at = functions.get_update_at()
three_days_after = functions.get_three_days_after()
next_month = functions.get_one_month_later()
for i in range(len(self.data)):
#取得したツイートで最新のものをHistoryテーブルに保存
if i == 0:
history_table.put_item(
Item = {
"last_tweet": functions.return_decimal(1),
"id": self.data[i]["id"],
"timestamp": self.data[i]["created_at"],
"updated_at_str": updated_at["datetime_str"],
"updated_at_date": updated_at["updated_at_date"],
"updated_at_time": updated_at["updated_at_time"],
"time_to_live": three_days_after,
}
)
#SendRekognition.checking_imgで除外された画像を除く画像セット(各ツイート)
img_set = [img for img in self.data[i]["img"] if img["bounding_box"] != []]
if img_set != []:
img_set = json.dumps(img_set, default=functions.decimal_default_proc)
#ツイート情報をDynamoDBにput
table.put_item(
Item = {
"id": self.data[i]["id"],
"user_name": self.data[i]["user_name"],
"user_screen_name": self.data[i]["user_screen_name"],
"user_profile_image": self.data[i]["user_profile_image"],
"text": self.data[i]["text"],
"hour_count": self.data[i]["hour_count"],
"favorite": self.data[i]["favorite_count"],
"past_favorite": self.data[i]["past_favorite"],
"d_fav": self.data[i]["d_fav"],
"retweet": self.data[i]["retweet_count"],
"past_retweet": self.data[i]["past_retweet"],
"d_RT": self.data[i]["d_RT"],
"timestamp": self.data[i]["created_at"],
"updated_at_str": updated_at["datetime_str"],
"updated_at_date": updated_at["updated_at_date"],
"updated_at_time": updated_at["updated_at_time"],
"time_to_live": next_month,
"url": self.data[i]["url"],
"img": img_set
}
)
#ユーザー情報をDynamoDBにput
user_table.put_item(
Item = {
"div": 1,
"user_name": self.data[i]["user_name"],
"user_screen_name": self.data[i]["user_screen_name"],
"user_profile_image": self.data[i]["user_profile_image"],
"user_profile_banner": self.data[i]["user_profile_banner"],
"user_profile_description": self.data[i]["user_profile_description"],
"user_profile_url": self.data[i]["user_profile_url"],
"user_profile_follow_count": self.data[i]["user_profile_follow_count"],
"user_profile_follower_count": self.data[i]["user_profile_follower_count"],
"time_to_live": next_month
}
)
count += 1
except Exception as e:
print('DynamoDB Error: ' + str(e))
finally:
print('Finish putting DynamoDB, put {} Tweet'.format(count))
def handler(event, context):
scraper = TweetScraper()
scraper.search()
rekognition = SendRekognition(scraper.tweet_data)
rekognition.add_labels()
send_dynamoDB = SendDynamoDB(rekognition.data)
send_dynamoDB.put()
#Appear rekognition.data
#return{
# 'isBase64Encoded': False,
# 'statusCode': 200,
# 'headers': {},
# 'body': rekognition.data
#} | index.py | import sys
sys.path.append('lib/tweepy')
import boto3
import json
import io
import urllib.request
import tweepy
import datetime
#自作関数
import functions
#Twitterの認証
twitter = json.loads(functions.get_secret())
CK = twitter["TWITTER_CK"]
CS = twitter["TWITTER_CS"]
AT = twitter["TWITTER_AT"]
AS = twitter["TWITTER_AS"]
#検索設定
SEARCH_TEXT = "(#VRC OR #VRChat OR #VRCSnap OR #VRCSnap! OR #今日のVRChat)"
SEARCH_COUNT = 100
#自撮り判定設定
PERSON_THRESHOLD = 75
#AWS設定
try:
#DynamoDB設定
dynamoDB = boto3.resource('dynamodb', 'ap-northeast-1')
table = dynamoDB.Table("tweet2rekognition")
history_table = dynamoDB.Table("tweet2rekognition_history")
user_table = dynamoDB.Table("tweet2rekognition_user")
#Rekognition設定
rekognition = boto3.client('rekognition', 'ap-northeast-1')
except Exception as e:
raise('AWS Setup Error: ' + str(e))
finally:
print('Finish AWS Setup')
#Twitterのスクレイピング
class TweetScraper:
def __init__(self):
self.tweet_data = []
self.set_twitter_api()
#Twitterオブジェクトの生成
def set_twitter_api(self):
try:
auth = tweepy.OAuthHandler(CK, CS)
auth.set_access_token(AT, AS)
self.api = tweepy.API(auth)
except Exception as e:
raise('Twitter API Setup Error: ' + str(e))
finally:
print('Set Twitter API Object')
#画像ありツイートをSEARCH_TEXTでSEARCHCOUNTだけ検索
def search(self):
try:
for result in self.api.search(q='{} -filter:retweets filter:images'.format(SEARCH_TEXT), result_type='recent', count=SEARCH_COUNT):
url = 'https://twitter.com/{}/status/{}'.format(result.user.screen_name, result.id)
text = result.text.replace('\n', '')
#画像ありツイートのみ抽出
if 'media' in result.entities.keys():
#画像の個数
num_media = len(result.extended_entities["media"])
#データ入力
self.tweet_data.append({"id": functions.return_decimal(result.id),
"user_name": result.user.name,
"user_screen_name": result.user.screen_name,
"user_profile_image": result.user.profile_image_url_https,
"user_profile_banner": result.user._json.get("profile_banner_url"),
"user_profile_description": result.user.description,
"user_profile_url": result.user.url,
"user_profile_follow_count": result.user.friends_count,
"user_profile_follower_count": result.user.followers_count,
"text": result.text,
"hour_count": 0,
"favorite_count": functions.return_decimal(result.favorite_count),
"past_favorite": 0,
"d_fav": 0,
"retweet_count": functions.return_decimal(result.retweet_count),
"past_retweet": 0,
"d_RT": 0,
"created_at": functions.return_decimal(result.created_at.timestamp()),
"url": url,
"img": []
})
for i in range(num_media):
self.tweet_data[-1]["img"].append({
"id": result.extended_entities["media"][i]["id_str"],
"url": result.extended_entities["media"][i]["media_url_https"],
"labels": [],
"bounding_box": []
})
except Exception as e:
print('Twitter Search Error: ' + str(e))
finally:
print('Finish Twitter Search')
#Rekognitionでラベル付け
class SendRekognition:
def __init__(self, data):
self.data = data
def send(self, img):
try:
return rekognition.detect_labels(Image={"Bytes": img}, MaxLabels=10)
except Exception as e:
print('Rekognition Error: ' + str(e))
def checking_img(self, i, j, labels):
label_names = [l["Name"] for l in labels] #ラベル名(Key)のリスト
keyword_set = ["Clothing", "Fashion", "Apparel", "Accessories", "Accessory"]
# labelが"Name": "Person"を持ち、keyword_setのうちどれかをラベルに持つ場合
if "Person" in label_names and (len(set(label_names) & set(keyword_set)) != 0):
for label in labels:
# Nameが"Person"でかつ、ConfidenceがPERSON_THRESHOLDを超え、BoundingBoxを持つ場合
if label["Name"] == "Person" and label["Confidence"] > PERSON_THRESHOLD and len(label["Instances"]) > 0 and "BoundingBox" in [b for b in label["Instances"][0]]:
self.data[i]["img"][j]["labels"] = label_names
for b in range(len(label["Instances"])):
if label["Instances"][b]["Confidence"] > PERSON_THRESHOLD:
#BoundingBoxをDecimal変換
label["Instances"][b]["BoundingBox"]["Width"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Width"])
label["Instances"][b]["BoundingBox"]["Height"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Height"])
label["Instances"][b]["BoundingBox"]["Left"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Left"])
label["Instances"][b]["BoundingBox"]["Top"] = functions.return_decimal(label["Instances"][b]["BoundingBox"]["Top"])
self.data[i]["img"][j]["bounding_box"].append((label["Instances"][b]["BoundingBox"]))
break
def add_labels(self):
try:
#各ツイート
new_tweet_count = 0
update_tweet_count = 0
#DynamoDBで最新のツイートIDを取得
latest_tweet = functions.get_latest_tweet_id(history_table)
latest_tweet_id = int(latest_tweet[0]["id"]) if len(latest_tweet) > 0 else 0
print("latest_tweet_id: {}".format(latest_tweet_id))
for i in range(len(self.data)):
if self.data[i]["id"] > latest_tweet_id:
#各ツイート内の各画像
for j in range(len(self.data[i]["img"])):
img_in = urllib.request.urlopen(self.data[i]["img"][j]["url"]).read()
img_bin = io.BytesIO(img_in)
result = self.send(img_in)
self.checking_img(i, j, result["Labels"])
if len(self.data[i]["img"]) > 0:
new_tweet_count += 1
else:
#取得済みツイートの場合
scanned_tweet = functions.get_tweet_id(table, self.data[i]["id"])
if len(scanned_tweet) > 0:
self.data[i]["img"] = json.loads(scanned_tweet[0]["img"])
#d_fav, d_RTの計算
self.data[i]["hour_count"] = scanned_tweet[0].get("hour_count", 0)
self.data[i]["past_favorite"] = scanned_tweet[0].get("past_favorite", 0)
self.data[i]["d_fav"] = scanned_tweet[0].get("d_fav", 0)
self.data[i]["past_retweet"] = scanned_tweet[0].get("past_retweet", 0)
self.data[i]["d_RT"] = scanned_tweet[0].get("d_RT", 0)
#毎時の判定(このlambda functionは10分おきに起動)
if self.data[i]["hour_count"] == 6:
self.data[i]["d_fav"] = self.data[i]["favorite_count"] - self.data[i]["past_favorite"]
self.data[i]["past_favorite"] = self.data[i]["favorite_count"]
self.data[i]["d_RT"] = self.data[i]["retweet_count"] - self.data[i]["past_retweet"]
self.data[i]["past_retweet"] = self.data[i]["retweet_count"]
self.data[i]["hour_count"] = 0
else:
#hour_countのカウントアップ
self.data[i]["hour_count"] += 1
update_tweet_count +=1
except Exception as e:
print("Add Labels Error: " +str(e))
finally:
print("Finish Labeling, Add {} Tweet, Update {} Tweet".format(new_tweet_count, update_tweet_count))
#DynamoDBにデータを送信
class SendDynamoDB:
def __init__(self, data):
self.data = data
def put(self):
try:
count = 0 # NewTweetCount
updated_at = functions.get_update_at()
three_days_after = functions.get_three_days_after()
next_month = functions.get_one_month_later()
for i in range(len(self.data)):
#取得したツイートで最新のものをHistoryテーブルに保存
if i == 0:
history_table.put_item(
Item = {
"last_tweet": functions.return_decimal(1),
"id": self.data[i]["id"],
"timestamp": self.data[i]["created_at"],
"updated_at_str": updated_at["datetime_str"],
"updated_at_date": updated_at["updated_at_date"],
"updated_at_time": updated_at["updated_at_time"],
"time_to_live": three_days_after,
}
)
#SendRekognition.checking_imgで除外された画像を除く画像セット(各ツイート)
img_set = [img for img in self.data[i]["img"] if img["bounding_box"] != []]
if img_set != []:
img_set = json.dumps(img_set, default=functions.decimal_default_proc)
#ツイート情報をDynamoDBにput
table.put_item(
Item = {
"id": self.data[i]["id"],
"user_name": self.data[i]["user_name"],
"user_screen_name": self.data[i]["user_screen_name"],
"user_profile_image": self.data[i]["user_profile_image"],
"text": self.data[i]["text"],
"hour_count": self.data[i]["hour_count"],
"favorite": self.data[i]["favorite_count"],
"past_favorite": self.data[i]["past_favorite"],
"d_fav": self.data[i]["d_fav"],
"retweet": self.data[i]["retweet_count"],
"past_retweet": self.data[i]["past_retweet"],
"d_RT": self.data[i]["d_RT"],
"timestamp": self.data[i]["created_at"],
"updated_at_str": updated_at["datetime_str"],
"updated_at_date": updated_at["updated_at_date"],
"updated_at_time": updated_at["updated_at_time"],
"time_to_live": next_month,
"url": self.data[i]["url"],
"img": img_set
}
)
#ユーザー情報をDynamoDBにput
user_table.put_item(
Item = {
"div": 1,
"user_name": self.data[i]["user_name"],
"user_screen_name": self.data[i]["user_screen_name"],
"user_profile_image": self.data[i]["user_profile_image"],
"user_profile_banner": self.data[i]["user_profile_banner"],
"user_profile_description": self.data[i]["user_profile_description"],
"user_profile_url": self.data[i]["user_profile_url"],
"user_profile_follow_count": self.data[i]["user_profile_follow_count"],
"user_profile_follower_count": self.data[i]["user_profile_follower_count"],
"time_to_live": next_month
}
)
count += 1
except Exception as e:
print('DynamoDB Error: ' + str(e))
finally:
print('Finish putting DynamoDB, put {} Tweet'.format(count))
def handler(event, context):
scraper = TweetScraper()
scraper.search()
rekognition = SendRekognition(scraper.tweet_data)
rekognition.add_labels()
send_dynamoDB = SendDynamoDB(rekognition.data)
send_dynamoDB.put()
#Appear rekognition.data
#return{
# 'isBase64Encoded': False,
# 'statusCode': 200,
# 'headers': {},
# 'body': rekognition.data
#} | 0.16529 | 0.15633 |
import unittest
import spydrnet as sdn
from spydrnet.parsers.edif.parser import EdifParser
from spydrnet import base_dir
import os
import tempfile
import glob
import shutil
class TestEdifTokenizer(unittest.TestCase):
def test_multi_bit_add_out_of_order(self):
definition = sdn.Definition()
cable0 = sdn.Cable()
cable0.name = "net[0]"
cable0["EDIF.identifier"] = "net_0_"
cable1 = sdn.Cable()
cable1.name = "net[1]"
cable1["EDIF.identifier"] = "net_1_"
cable2 = sdn.Cable()
cable2.name = "net[2]"
cable2["EDIF.identifier"] = "net_2_"
cable0.create_wire()
cable1.create_wire()
cable2.create_wire()
p0 = sdn.InnerPin()
p1 = sdn.InnerPin()
p2 = sdn.InnerPin()
cable1.wires[0].connect_pin(p0)
cable1.wires[0].connect_pin(p1)
cable1.wires[0].connect_pin(p2)
ep = EdifParser()
ep.multibit_add_cable(definition, cable0)
ep.multibit_add_cable(definition, cable2)
ep.multibit_add_cable(definition, cable1)
assert len(definition.cables) == 1
assert len (definition.cables[0].wires) == 3
assert len(definition.cables[0].wires[0].pins) == 0
assert len(definition.cables[0].wires[1].pins) == 3
assert len(definition.cables[0].wires[2].pins) == 0
assert p0 in definition.cables[0].wires[1].pins
assert p1 in definition.cables[0].wires[1].pins
assert p2 in definition.cables[0].wires[1].pins
@classmethod
def setUpClass(cls) -> None:
cls.dir_of_edif_netlists = os.path.join(sdn.base_dir, "support_files", "EDIF_netlists")
cls.edif_files = sorted(glob.glob(os.path.join(cls.dir_of_edif_netlists, "*.edf.zip")), key=os.path.getsize)
@unittest.skip("Test takes a long time right now.")
def test_large_edif(self):
for ii, filename in enumerate(self.edif_files):
if os.path.getsize(filename) <= 1024 * 10:
continue
self.ensure_cable_consistency(filename, ii, "edf")
def test_small_edif_cables(self):
for ii, filename in enumerate(self.edif_files):
if os.path.getsize(filename) > 1024 * 10:
continue
self.ensure_cable_consistency(filename, ii, "edf")
def ensure_cable_consistency(self,filename, ii, target_format_extension = None):
with self.subTest(i=ii):
if os.path.exists("temp"):
shutil.rmtree("temp")
print(filename)
with tempfile.TemporaryDirectory() as tempdirname:
netlist = sdn.parse(filename)
for l in netlist.libraries:
for d in l.definitions:
for c in d.cables:
assert c.definition is not None
if __name__ == '__main__':
unittest.main() | spydrnet/parsers/edif/tests/test_edif_parser.py | import unittest
import spydrnet as sdn
from spydrnet.parsers.edif.parser import EdifParser
from spydrnet import base_dir
import os
import tempfile
import glob
import shutil
class TestEdifTokenizer(unittest.TestCase):
def test_multi_bit_add_out_of_order(self):
definition = sdn.Definition()
cable0 = sdn.Cable()
cable0.name = "net[0]"
cable0["EDIF.identifier"] = "net_0_"
cable1 = sdn.Cable()
cable1.name = "net[1]"
cable1["EDIF.identifier"] = "net_1_"
cable2 = sdn.Cable()
cable2.name = "net[2]"
cable2["EDIF.identifier"] = "net_2_"
cable0.create_wire()
cable1.create_wire()
cable2.create_wire()
p0 = sdn.InnerPin()
p1 = sdn.InnerPin()
p2 = sdn.InnerPin()
cable1.wires[0].connect_pin(p0)
cable1.wires[0].connect_pin(p1)
cable1.wires[0].connect_pin(p2)
ep = EdifParser()
ep.multibit_add_cable(definition, cable0)
ep.multibit_add_cable(definition, cable2)
ep.multibit_add_cable(definition, cable1)
assert len(definition.cables) == 1
assert len (definition.cables[0].wires) == 3
assert len(definition.cables[0].wires[0].pins) == 0
assert len(definition.cables[0].wires[1].pins) == 3
assert len(definition.cables[0].wires[2].pins) == 0
assert p0 in definition.cables[0].wires[1].pins
assert p1 in definition.cables[0].wires[1].pins
assert p2 in definition.cables[0].wires[1].pins
@classmethod
def setUpClass(cls) -> None:
cls.dir_of_edif_netlists = os.path.join(sdn.base_dir, "support_files", "EDIF_netlists")
cls.edif_files = sorted(glob.glob(os.path.join(cls.dir_of_edif_netlists, "*.edf.zip")), key=os.path.getsize)
@unittest.skip("Test takes a long time right now.")
def test_large_edif(self):
for ii, filename in enumerate(self.edif_files):
if os.path.getsize(filename) <= 1024 * 10:
continue
self.ensure_cable_consistency(filename, ii, "edf")
def test_small_edif_cables(self):
for ii, filename in enumerate(self.edif_files):
if os.path.getsize(filename) > 1024 * 10:
continue
self.ensure_cable_consistency(filename, ii, "edf")
def ensure_cable_consistency(self,filename, ii, target_format_extension = None):
with self.subTest(i=ii):
if os.path.exists("temp"):
shutil.rmtree("temp")
print(filename)
with tempfile.TemporaryDirectory() as tempdirname:
netlist = sdn.parse(filename)
for l in netlist.libraries:
for d in l.definitions:
for c in d.cables:
assert c.definition is not None
if __name__ == '__main__':
unittest.main() | 0.40392 | 0.280789 |
import logging
import argparse
import random
import math
import matplotlib.pyplot as plt
import ast
parser = argparse.ArgumentParser()
parser.add_argument('--number_of_inliers', help="The number of inliers. Default: 200", type=int, default=200)
parser.add_argument('--inliers_noise', help="The noise for the inliers. Default: 0.5", type=float, default=0.5)
parser.add_argument('--number_of_outliers', help="The number of outliers. Default: 200", type=int, default=200)
parser.add_argument('--outliers_range', help="For the outliers, the range of (x, y) values. Default: '[-10, 10]'", default='[-10, 10]')
parser.add_argument('--outputFilepath', help="The output filepath. Default: '../data/noisy_circles.csv'", default='../data/noisy_circles.csv')
args = parser.parse_args()
args.outliers_range = ast.literal_eval(args.outliers_range)
circle1_center = (1, 3.5)
circle2_center = (-7.5, -5.5)
circle1_radius = 4.0
circle2_radius = 2.0
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s \t%(message)s')
def main():
logging.info("create_noisy_circles.py main()")
xy_tuples = []
# Principal circle
for inlierNdx in range(args.number_of_inliers):
theta = random.uniform(0, 2 * math.pi)
pt = [circle1_center[0] + circle1_radius * math.cos(theta),
circle1_center[1] + circle1_radius * math.sin(theta)]
pt[0] += random.uniform(-args.inliers_noise, args.inliers_noise)
pt[1] += random.uniform(-args.inliers_noise, args.inliers_noise)
xy_tuples.append((pt, 0))
# Secondary circle
for inlierNdx in range(args.number_of_inliers//2):
theta = random.uniform(0, 2 * math.pi)
pt = [circle2_center[0] + circle2_radius * math.cos(theta),
circle2_center[1] + circle2_radius * math.sin(theta)]
pt[0] += random.uniform(-args.inliers_noise, args.inliers_noise)
pt[1] += random.uniform(-args.inliers_noise, args.inliers_noise)
xy_tuples.append((pt, 0))
# Outliers
for outlierNdx in range(args.number_of_outliers):
pt = [random.uniform(args.outliers_range[0], args.outliers_range[1]),
random.uniform(args.outliers_range[0], args.outliers_range[1])]
xy_tuples.append((pt, 0))
# Shuffle the data
random.shuffle(xy_tuples)
# Display the points
fig, ax = plt.subplots()
ax.scatter([xy[0][0] for xy in xy_tuples], [xy[0][1] for xy in xy_tuples],
c='green', s=3)
ax.grid(True)
plt.show()
# Write to file
with open(args.outputFilepath, 'w') as output_file:
output_file.write("x0,x1\n")
for x_y in xy_tuples:
output_file.write("{},{}\n".format(x_y[0][0], x_y[0][1]))
if __name__ == '__main__':
main() | utilities/create_noisy_circles.py | import logging
import argparse
import random
import math
import matplotlib.pyplot as plt
import ast
parser = argparse.ArgumentParser()
parser.add_argument('--number_of_inliers', help="The number of inliers. Default: 200", type=int, default=200)
parser.add_argument('--inliers_noise', help="The noise for the inliers. Default: 0.5", type=float, default=0.5)
parser.add_argument('--number_of_outliers', help="The number of outliers. Default: 200", type=int, default=200)
parser.add_argument('--outliers_range', help="For the outliers, the range of (x, y) values. Default: '[-10, 10]'", default='[-10, 10]')
parser.add_argument('--outputFilepath', help="The output filepath. Default: '../data/noisy_circles.csv'", default='../data/noisy_circles.csv')
args = parser.parse_args()
args.outliers_range = ast.literal_eval(args.outliers_range)
circle1_center = (1, 3.5)
circle2_center = (-7.5, -5.5)
circle1_radius = 4.0
circle2_radius = 2.0
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(levelname)s \t%(message)s')
def main():
logging.info("create_noisy_circles.py main()")
xy_tuples = []
# Principal circle
for inlierNdx in range(args.number_of_inliers):
theta = random.uniform(0, 2 * math.pi)
pt = [circle1_center[0] + circle1_radius * math.cos(theta),
circle1_center[1] + circle1_radius * math.sin(theta)]
pt[0] += random.uniform(-args.inliers_noise, args.inliers_noise)
pt[1] += random.uniform(-args.inliers_noise, args.inliers_noise)
xy_tuples.append((pt, 0))
# Secondary circle
for inlierNdx in range(args.number_of_inliers//2):
theta = random.uniform(0, 2 * math.pi)
pt = [circle2_center[0] + circle2_radius * math.cos(theta),
circle2_center[1] + circle2_radius * math.sin(theta)]
pt[0] += random.uniform(-args.inliers_noise, args.inliers_noise)
pt[1] += random.uniform(-args.inliers_noise, args.inliers_noise)
xy_tuples.append((pt, 0))
# Outliers
for outlierNdx in range(args.number_of_outliers):
pt = [random.uniform(args.outliers_range[0], args.outliers_range[1]),
random.uniform(args.outliers_range[0], args.outliers_range[1])]
xy_tuples.append((pt, 0))
# Shuffle the data
random.shuffle(xy_tuples)
# Display the points
fig, ax = plt.subplots()
ax.scatter([xy[0][0] for xy in xy_tuples], [xy[0][1] for xy in xy_tuples],
c='green', s=3)
ax.grid(True)
plt.show()
# Write to file
with open(args.outputFilepath, 'w') as output_file:
output_file.write("x0,x1\n")
for x_y in xy_tuples:
output_file.write("{},{}\n".format(x_y[0][0], x_y[0][1]))
if __name__ == '__main__':
main() | 0.481454 | 0.304145 |
import numpy
import scipy.special
import matplotlib.pyplot
import imageio
import glob
# 神经网络类定义
class neuralNetwork:
# 初始化神经网络
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# 设置每个输入、隐藏、输出层的节点数
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# 链接权值矩阵,wih and who
# 数组中的权重是w_i_j,其中链路是从节点i到下一层的节点j
# w11 w21
# w12 w22 etc
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# 学习速率
self.lr = learningrate
# 激活函数是s型函数
self.activation_function = lambda x: scipy.special.expit(x)
pass
# 训练神经网络
def train(self, inputs_list, targets_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
# 输出层误差为(目标值-实际值)
output_errors = targets - final_outputs
# 隐藏层错误是output_errors,按权重分割,在隐藏节点处重新组合
hidden_errors = numpy.dot(self.who.T, output_errors)
# 更新隐藏层和输出层之间的链接的权重
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs))
# 更新输入层和隐藏层之间的链接的权值
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs))
pass
# 查询神经网络
def query(self, inputs_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
return final_outputs
# 输入、隐藏和输出节点的数量
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
# 学习速率
learning_rate = 0.1
# 创建神经网络实例
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
# 将mnist训练数据CSV文件加载到列表中
training_data_file = open("MNIST_data/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# 训练神经网络
# epochs是训练数据集用于训练的次数
epochs = 10
for e in range(epochs):
# 检查训练数据集中的所有记录
for record in training_data_list:
# 用逗号分隔记录
all_values = record.split(',')
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 创建目标输出值(都是0.01,除了所需的标签为0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0]是该记录的目标标签
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
# 测试神经网络
# 将mnist测试数据csv文件加载到列表中
test_data_file = open("MNIST_data/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# 记录神经网络执行情况,最初为空
scorecard = []
# 遍历测试数据集中的所有记录
for record in test_data_list:
all_values = record.split(',')
# 正确答案为第一个值
correct_label = int(all_values[0])
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 查询神经网络
outputs = n.query(inputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
# print("Answer label is:",correct_label," ; ",label," is network's answer")
# 判断结果正不正确
if (label == correct_label):
# 神经网络的测试结果与正确结果匹配,如果正确scorecard加1
scorecard.append(1)
else:
scorecard.append(0)
pass
# 计算准确率
scorecard_array = numpy.asarray(scorecard)
print("准确率为:", scorecard_array.sum() / scorecard_array.size)
# 用自己写的的图像测试数据集
our_own_dataset = []
# 加载png图像数据作为测试数据集
for image_file_name in glob.glob('Number4.png'):
# 使用文件名设置正确的标签
label = int(image_file_name[-5:-4])
# 将png文件图像转为数组
print("加载文件:", image_file_name)
img_array = imageio.imread(image_file_name, as_gray=True)
# 每张图片都由一个28 ×28 的矩阵表示,每张图片都由一个784 维的向量表示(28*28=784)
# 将数组的值减去了255.0。常规而言,0指的是黑色,255指的是白色,但是,MNIST数据集使用相反的方式表示,因此将值逆转过来以匹配MNIST数据
# 从28x28重塑到包含784个值的列表,反转值
img_data = 255.0 - img_array.reshape(784)
# 然后将数据缩放到范围从0.01到1.0
img_data = (img_data / 255.0 * 0.99) + 0.01
print(numpy.min(img_data))
print(numpy.max(img_data))
# 附加标签和图像数据来测试数据集
record = numpy.append(label, img_data)
our_own_dataset.append(record)
pass
# 用我们自己的图像来测试神经网络
# 记录测试
item = 0
# plot image
matplotlib.pyplot.imshow(our_own_dataset[item][1:].reshape(28,28), cmap='Greys', interpolation='None')
# 正确答案为第一个值
correct_label = our_own_dataset[item][0]
# 数据是剩余值
inputs = our_own_dataset[item][1:]
# 查询神经网络
outputs = n.query(inputs)
print (outputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
print("神经网络测试结果:", label)
# 判断结果正不正确
if (label == correct_label):
print ("match!")
else:
print ("no match!")
pass | Handwritten_digit_recognition/Handwritten.py | import numpy
import scipy.special
import matplotlib.pyplot
import imageio
import glob
# 神经网络类定义
class neuralNetwork:
# 初始化神经网络
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# 设置每个输入、隐藏、输出层的节点数
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# 链接权值矩阵,wih and who
# 数组中的权重是w_i_j,其中链路是从节点i到下一层的节点j
# w11 w21
# w12 w22 etc
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# 学习速率
self.lr = learningrate
# 激活函数是s型函数
self.activation_function = lambda x: scipy.special.expit(x)
pass
# 训练神经网络
def train(self, inputs_list, targets_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
# 输出层误差为(目标值-实际值)
output_errors = targets - final_outputs
# 隐藏层错误是output_errors,按权重分割,在隐藏节点处重新组合
hidden_errors = numpy.dot(self.who.T, output_errors)
# 更新隐藏层和输出层之间的链接的权重
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs))
# 更新输入层和隐藏层之间的链接的权值
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs))
pass
# 查询神经网络
def query(self, inputs_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
return final_outputs
# 输入、隐藏和输出节点的数量
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
# 学习速率
learning_rate = 0.1
# 创建神经网络实例
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
# 将mnist训练数据CSV文件加载到列表中
training_data_file = open("MNIST_data/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# 训练神经网络
# epochs是训练数据集用于训练的次数
epochs = 10
for e in range(epochs):
# 检查训练数据集中的所有记录
for record in training_data_list:
# 用逗号分隔记录
all_values = record.split(',')
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 创建目标输出值(都是0.01,除了所需的标签为0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0]是该记录的目标标签
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
# 测试神经网络
# 将mnist测试数据csv文件加载到列表中
test_data_file = open("MNIST_data/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# 记录神经网络执行情况,最初为空
scorecard = []
# 遍历测试数据集中的所有记录
for record in test_data_list:
all_values = record.split(',')
# 正确答案为第一个值
correct_label = int(all_values[0])
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 查询神经网络
outputs = n.query(inputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
# print("Answer label is:",correct_label," ; ",label," is network's answer")
# 判断结果正不正确
if (label == correct_label):
# 神经网络的测试结果与正确结果匹配,如果正确scorecard加1
scorecard.append(1)
else:
scorecard.append(0)
pass
# 计算准确率
scorecard_array = numpy.asarray(scorecard)
print("准确率为:", scorecard_array.sum() / scorecard_array.size)
# 用自己写的的图像测试数据集
our_own_dataset = []
# 加载png图像数据作为测试数据集
for image_file_name in glob.glob('Number4.png'):
# 使用文件名设置正确的标签
label = int(image_file_name[-5:-4])
# 将png文件图像转为数组
print("加载文件:", image_file_name)
img_array = imageio.imread(image_file_name, as_gray=True)
# 每张图片都由一个28 ×28 的矩阵表示,每张图片都由一个784 维的向量表示(28*28=784)
# 将数组的值减去了255.0。常规而言,0指的是黑色,255指的是白色,但是,MNIST数据集使用相反的方式表示,因此将值逆转过来以匹配MNIST数据
# 从28x28重塑到包含784个值的列表,反转值
img_data = 255.0 - img_array.reshape(784)
# 然后将数据缩放到范围从0.01到1.0
img_data = (img_data / 255.0 * 0.99) + 0.01
print(numpy.min(img_data))
print(numpy.max(img_data))
# 附加标签和图像数据来测试数据集
record = numpy.append(label, img_data)
our_own_dataset.append(record)
pass
# 用我们自己的图像来测试神经网络
# 记录测试
item = 0
# plot image
matplotlib.pyplot.imshow(our_own_dataset[item][1:].reshape(28,28), cmap='Greys', interpolation='None')
# 正确答案为第一个值
correct_label = our_own_dataset[item][0]
# 数据是剩余值
inputs = our_own_dataset[item][1:]
# 查询神经网络
outputs = n.query(inputs)
print (outputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
print("神经网络测试结果:", label)
# 判断结果正不正确
if (label == correct_label):
print ("match!")
else:
print ("no match!")
pass | 0.153137 | 0.40645 |
import os
from setuptools import setup
from subprocess import Popen, PIPE
def git_tag():
try:
cmd = ['git', 'describe', '--tags', '--abbrev=0']
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
proc.stderr.close()
return proc.stdout.readlines()[0].strip().decode()
except:
return "0.1"
data_files = []
dest_theme1 = "share/themes/Clearine-Fallback/clearine"
dest_theme2 = "share/themes/Clearine-White/clearine"
dest_conf = "share/clearine"
for directory, _, filenames in os.walk(u'src/data/'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".conf"):
data_files.append((dest_conf, sourcefile))
for directory, _, filenames in os.walk(u'src/data/themes/Clearine-Fallback/clearine'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".svg"):
data_files.append((dest_theme1, sourcefile))
for directory, _, filenames in os.walk(u'src/data/themes/Clearine-White/clearine'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".svg"):
data_files.append((dest_theme2, sourcefile))
print(data_files)
setup(
name = "Clearine",
version = git_tag(),
author = "Silent Robot",
author_email = "<EMAIL>",
description = ("A Logout UI for X11 Window Managers"),
license = "MIT",
packages = ["Clearine"],
package_dir = {"Clearine": "src"},
package_data = {"Clearine": ["data/*"]},
data_files=data_files,
zip_safe=False,
url = "https://github.com/salientos/clearine",
project_urls = {
"Source": "https://github.com/salientos/clearine",
"Tracker": "https://github.com/salientos/clearine/issues",
},
install_requires = [
'pygobject',
'pycairo',
],
entry_points={
"console_scripts": ["clearine=Clearine.clearine:main"]
}
) | setup.py | import os
from setuptools import setup
from subprocess import Popen, PIPE
def git_tag():
try:
cmd = ['git', 'describe', '--tags', '--abbrev=0']
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
proc.stderr.close()
return proc.stdout.readlines()[0].strip().decode()
except:
return "0.1"
data_files = []
dest_theme1 = "share/themes/Clearine-Fallback/clearine"
dest_theme2 = "share/themes/Clearine-White/clearine"
dest_conf = "share/clearine"
for directory, _, filenames in os.walk(u'src/data/'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".conf"):
data_files.append((dest_conf, sourcefile))
for directory, _, filenames in os.walk(u'src/data/themes/Clearine-Fallback/clearine'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".svg"):
data_files.append((dest_theme1, sourcefile))
for directory, _, filenames in os.walk(u'src/data/themes/Clearine-White/clearine'):
for filename in filenames:
sourcefile = [os.path.join(directory, filename)]
if filename.endswith(".svg"):
data_files.append((dest_theme2, sourcefile))
print(data_files)
setup(
name = "Clearine",
version = git_tag(),
author = "Silent Robot",
author_email = "<EMAIL>",
description = ("A Logout UI for X11 Window Managers"),
license = "MIT",
packages = ["Clearine"],
package_dir = {"Clearine": "src"},
package_data = {"Clearine": ["data/*"]},
data_files=data_files,
zip_safe=False,
url = "https://github.com/salientos/clearine",
project_urls = {
"Source": "https://github.com/salientos/clearine",
"Tracker": "https://github.com/salientos/clearine/issues",
},
install_requires = [
'pygobject',
'pycairo',
],
entry_points={
"console_scripts": ["clearine=Clearine.clearine:main"]
}
) | 0.20466 | 0.08163 |
from model.backbone.mobilenetv2 import MobileNetV2_3feature
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class conv_bn_relu(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=False):
super(conv_bn_relu, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias)
self.bn = torch.nn.BatchNorm2d(out_channels)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
class LaneAttrNet(nn.Module):
def __init__(self, num_classes,
num_attributes,
in_channels=128,
fc_channels=3965):
super(LaneAttrNet, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=(3, 3), stride=1,
padding=(4, 4), bias=False, dilation=(4, 4)),
nn.BatchNorm2d(num_features=32, eps=1e-03),
nn.ReLU()
)
self.layers_final = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=32,
out_channels=5,
kernel_size=(1, 1),
stride=1,
padding=(0, 0),
bias=True),
nn.ReLU()
)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.linear1 = nn.Linear(fc_channels, 128)
self.linear2 = nn.Linear(128, num_classes * num_attributes)
def forward(self, x):
r"""1x128x26x122 -> 1x5x13x61"""
# inputs
x = self.layers(x)
x = self.layers_final(x)
x = F.softmax(x, dim=1)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
# print(x.size())
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
return x.sigmoid()
class parsingNet(nn.Module):
def __init__(self, num_lanes=4, backbone='mobilenetv2', cls_dim=(37, 10, 4),
use_aux=True):
super(parsingNet, self).__init__()
self.num_lanes = num_lanes
self.cls_dim = cls_dim
self.use_aux = use_aux
self.total_dim = np.prod(cls_dim)
self.model = MobileNetV2_3feature()
if self.use_aux:
self.aux_header2 = torch.nn.Sequential(
conv_bn_relu(32, 64, kernel_size=3, stride=1, padding=1),
conv_bn_relu(64, 64, 3, padding=1),
conv_bn_relu(64, 128, 3, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
)
self.aux_header3 = torch.nn.Sequential(
conv_bn_relu(96, 128, kernel_size=3, stride=1, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
)
self.aux_header4 = torch.nn.Sequential(
conv_bn_relu(1280, 320, kernel_size=3, stride=1, padding=1),
conv_bn_relu(320, 128, 3, padding=1),
)
self.aux_combine = torch.nn.Sequential(
conv_bn_relu(384, 256, 3, padding=2, dilation=2),
conv_bn_relu(256, 128, 3, padding=2, dilation=2),
conv_bn_relu(128, 128, 3, padding=2, dilation=2),
conv_bn_relu(128, 128, 3, padding=4, dilation=4),
torch.nn.Conv2d(128, num_lanes + 1, 1)
)
initialize_weights(self.aux_header2, self.aux_header3, self.aux_header4, self.aux_combine)
self.pool = torch.nn.Conv2d(1280, 8, 1)
self.cls = torch.nn.Sequential(
torch.nn.Linear(1920, 2048),
torch.nn.ReLU(),
torch.nn.Linear(2048, self.total_dim),
)
# 1/32,2048 channel
# 288,800 -> 9,40,2048
# (w+1) * sample_rows * 4
# 37 * 10 * 4
self.segmaxpool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.laneAtrr = LaneAttrNet(num_classes=4, num_attributes=8, in_channels=480, fc_channels=1200)
initialize_weights(self.cls)
initialize_weights(self.laneAtrr)
def forward(self, x):
x2, x3_, fea = self.model(x)
if self.use_aux:
x2 = self.aux_header2(x2)
x3 = self.aux_header3(x3_)
x3 = torch.nn.functional.interpolate(x3, size=x2.shape[2:], mode='bilinear', align_corners=True)
x4 = self.aux_header4(fea)
x4 = torch.nn.functional.interpolate(x4, size=x2.shape[2:], mode='bilinear', align_corners=True)
aux_seg_ = torch.cat([x2, x3, x4], dim=1)
aux_seg = self.aux_combine(aux_seg_)
else:
aux_seg = None
fea_ = self.pool(fea).view(-1, 1920)
group_cls = self.cls(fea_).view(-1, *self.cls_dim)
seg_map = self.segmaxpool2(aux_seg_)
seg_map = torch.cat([x3_, seg_map], dim=1)
type_cls = self.laneAtrr(seg_map)
type_cls = type_cls.view(-1, 2, 16)
if self.use_aux:
return group_cls, aux_seg, type_cls
return group_cls, type_cls
if __name__ == '__main__':
from model.model_test_common import *
model = parsingNet(cls_dim=(201, 32, 4), use_aux=True).cuda()
from model.model_utils import modelParams_FLOPs, modelTime
modelParams_FLOPs(model, inputTensor)
modelTime(model, inputTensor)
# modelParams_FLOPs()
# net = MobileNetV2_3feature()
# modelParams_FLOPs(net, inputTensor) | src/model/net.py | from model.backbone.mobilenetv2 import MobileNetV2_3feature
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class conv_bn_relu(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=False):
super(conv_bn_relu, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias)
self.bn = torch.nn.BatchNorm2d(out_channels)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
class LaneAttrNet(nn.Module):
def __init__(self, num_classes,
num_attributes,
in_channels=128,
fc_channels=3965):
super(LaneAttrNet, self).__init__()
self.layers = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=(3, 3), stride=1,
padding=(4, 4), bias=False, dilation=(4, 4)),
nn.BatchNorm2d(num_features=32, eps=1e-03),
nn.ReLU()
)
self.layers_final = nn.Sequential(
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=32,
out_channels=5,
kernel_size=(1, 1),
stride=1,
padding=(0, 0),
bias=True),
nn.ReLU()
)
self.maxpool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.linear1 = nn.Linear(fc_channels, 128)
self.linear2 = nn.Linear(128, num_classes * num_attributes)
def forward(self, x):
r"""1x128x26x122 -> 1x5x13x61"""
# inputs
x = self.layers(x)
x = self.layers_final(x)
x = F.softmax(x, dim=1)
x = self.maxpool(x)
x = x.view(x.size(0), -1)
# print(x.size())
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
return x.sigmoid()
class parsingNet(nn.Module):
def __init__(self, num_lanes=4, backbone='mobilenetv2', cls_dim=(37, 10, 4),
use_aux=True):
super(parsingNet, self).__init__()
self.num_lanes = num_lanes
self.cls_dim = cls_dim
self.use_aux = use_aux
self.total_dim = np.prod(cls_dim)
self.model = MobileNetV2_3feature()
if self.use_aux:
self.aux_header2 = torch.nn.Sequential(
conv_bn_relu(32, 64, kernel_size=3, stride=1, padding=1),
conv_bn_relu(64, 64, 3, padding=1),
conv_bn_relu(64, 128, 3, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
)
self.aux_header3 = torch.nn.Sequential(
conv_bn_relu(96, 128, kernel_size=3, stride=1, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
conv_bn_relu(128, 128, 3, padding=1),
)
self.aux_header4 = torch.nn.Sequential(
conv_bn_relu(1280, 320, kernel_size=3, stride=1, padding=1),
conv_bn_relu(320, 128, 3, padding=1),
)
self.aux_combine = torch.nn.Sequential(
conv_bn_relu(384, 256, 3, padding=2, dilation=2),
conv_bn_relu(256, 128, 3, padding=2, dilation=2),
conv_bn_relu(128, 128, 3, padding=2, dilation=2),
conv_bn_relu(128, 128, 3, padding=4, dilation=4),
torch.nn.Conv2d(128, num_lanes + 1, 1)
)
initialize_weights(self.aux_header2, self.aux_header3, self.aux_header4, self.aux_combine)
self.pool = torch.nn.Conv2d(1280, 8, 1)
self.cls = torch.nn.Sequential(
torch.nn.Linear(1920, 2048),
torch.nn.ReLU(),
torch.nn.Linear(2048, self.total_dim),
)
# 1/32,2048 channel
# 288,800 -> 9,40,2048
# (w+1) * sample_rows * 4
# 37 * 10 * 4
self.segmaxpool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.laneAtrr = LaneAttrNet(num_classes=4, num_attributes=8, in_channels=480, fc_channels=1200)
initialize_weights(self.cls)
initialize_weights(self.laneAtrr)
def forward(self, x):
x2, x3_, fea = self.model(x)
if self.use_aux:
x2 = self.aux_header2(x2)
x3 = self.aux_header3(x3_)
x3 = torch.nn.functional.interpolate(x3, size=x2.shape[2:], mode='bilinear', align_corners=True)
x4 = self.aux_header4(fea)
x4 = torch.nn.functional.interpolate(x4, size=x2.shape[2:], mode='bilinear', align_corners=True)
aux_seg_ = torch.cat([x2, x3, x4], dim=1)
aux_seg = self.aux_combine(aux_seg_)
else:
aux_seg = None
fea_ = self.pool(fea).view(-1, 1920)
group_cls = self.cls(fea_).view(-1, *self.cls_dim)
seg_map = self.segmaxpool2(aux_seg_)
seg_map = torch.cat([x3_, seg_map], dim=1)
type_cls = self.laneAtrr(seg_map)
type_cls = type_cls.view(-1, 2, 16)
if self.use_aux:
return group_cls, aux_seg, type_cls
return group_cls, type_cls
if __name__ == '__main__':
from model.model_test_common import *
model = parsingNet(cls_dim=(201, 32, 4), use_aux=True).cuda()
from model.model_utils import modelParams_FLOPs, modelTime
modelParams_FLOPs(model, inputTensor)
modelTime(model, inputTensor)
# modelParams_FLOPs()
# net = MobileNetV2_3feature()
# modelParams_FLOPs(net, inputTensor) | 0.956166 | 0.416915 |
"""Tests for parsing module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import textwrap
from absl.testing import absltest as test
import gast
from pyctr.core import parsing
class ParsingTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
mod, _ = parsing.parse_entity(f)
self.assertEqual('f', mod.body[0].name)
def test_parse_str(self):
mod = parsing.parse_str(
textwrap.dedent("""
def f(x):
return x + 1
"""))
self.assertEqual('f', mod.body[0].name)
def test_parse_comments(self):
def f():
# unindented comment
pass
with self.assertRaises(ValueError):
parsing.parse_entity(f)
def test_parse_multiline_strings(self):
def f():
print("""
some
multiline
string""")
with self.assertRaises(ValueError):
parsing.parse_entity(f)
def test_parse_expression(self):
node = parsing.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
def test_parsing_compile_idempotent(self):
def test_fn(x):
a = True
b = ''
if a:
b = x + 1
return b
self.assertEqual(
textwrap.dedent(inspect.getsource(test_fn)),
inspect.getsource(
parsing.ast_to_object(
parsing.parse_entity(test_fn)[0].body[0])[0].test_fn))
def test_ast_to_source(self):
node = gast.If(
test=gast.Num(1),
body=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Name('b', gast.Load(), None))
],
orelse=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Str('c'))
])
source = parsing.ast_to_source(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ast_to_object(self):
node = gast.FunctionDef(
name='f',
args=gast.arguments(
args=[gast.Name('a', gast.Param(), None)],
vararg=None,
kwonlyargs=[],
kwarg=None,
defaults=[],
kw_defaults=[]),
body=[
gast.Return(
gast.BinOp(
op=gast.Add(),
left=gast.Name('a', gast.Load(), None),
right=gast.Num(1)))
],
decorator_list=[],
returns=None)
module, source = parsing.ast_to_object(node)
expected_source = """
def f(a):
return a + 1
"""
self.assertEqual(textwrap.dedent(expected_source).strip(), source.strip())
self.assertEqual(2, module.f(1))
with open(module.__file__, 'r') as temp_output:
self.assertEqual(
textwrap.dedent(expected_source).strip(),
temp_output.read().strip())
if __name__ == '__main__':
test.main() | core/parsing_test.py | """Tests for parsing module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import textwrap
from absl.testing import absltest as test
import gast
from pyctr.core import parsing
class ParsingTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
mod, _ = parsing.parse_entity(f)
self.assertEqual('f', mod.body[0].name)
def test_parse_str(self):
mod = parsing.parse_str(
textwrap.dedent("""
def f(x):
return x + 1
"""))
self.assertEqual('f', mod.body[0].name)
def test_parse_comments(self):
def f():
# unindented comment
pass
with self.assertRaises(ValueError):
parsing.parse_entity(f)
def test_parse_multiline_strings(self):
def f():
print("""
some
multiline
string""")
with self.assertRaises(ValueError):
parsing.parse_entity(f)
def test_parse_expression(self):
node = parsing.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
def test_parsing_compile_idempotent(self):
def test_fn(x):
a = True
b = ''
if a:
b = x + 1
return b
self.assertEqual(
textwrap.dedent(inspect.getsource(test_fn)),
inspect.getsource(
parsing.ast_to_object(
parsing.parse_entity(test_fn)[0].body[0])[0].test_fn))
def test_ast_to_source(self):
node = gast.If(
test=gast.Num(1),
body=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Name('b', gast.Load(), None))
],
orelse=[
gast.Assign(
targets=[gast.Name('a', gast.Store(), None)],
value=gast.Str('c'))
])
source = parsing.ast_to_source(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ast_to_object(self):
node = gast.FunctionDef(
name='f',
args=gast.arguments(
args=[gast.Name('a', gast.Param(), None)],
vararg=None,
kwonlyargs=[],
kwarg=None,
defaults=[],
kw_defaults=[]),
body=[
gast.Return(
gast.BinOp(
op=gast.Add(),
left=gast.Name('a', gast.Load(), None),
right=gast.Num(1)))
],
decorator_list=[],
returns=None)
module, source = parsing.ast_to_object(node)
expected_source = """
def f(a):
return a + 1
"""
self.assertEqual(textwrap.dedent(expected_source).strip(), source.strip())
self.assertEqual(2, module.f(1))
with open(module.__file__, 'r') as temp_output:
self.assertEqual(
textwrap.dedent(expected_source).strip(),
temp_output.read().strip())
if __name__ == '__main__':
test.main() | 0.822011 | 0.486636 |
from pathlib import Path
from typing import Dict, List, Union
import einops
import torch
import torch.nn.functional as F
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
from peach.utils import TokenConverter
class SpeechCommandsDataset(Dataset):
def __init__(
self,
wav_paths: List[Path],
targets: List[int],
max_waveform_length: int=16000,
):
self.wav_paths = wav_paths
self.targets = targets
self.max_waveform_length = max_waveform_length
def __len__(self):
return len(self.wav_paths)
def __getitem__(self, idx):
wav_path = self.wav_paths[idx]
waveform, sample_rate = torchaudio.load(wav_path)
waveform = einops.rearrange(waveform, 'b x -> (b x)')
padded_waveform = F.pad(
input=waveform,
pad=(0, self.max_waveform_length - len(waveform)),
mode='constant',
value=0,
)
target = torch.tensor(self.targets[idx])
result = (
padded_waveform,
target,
)
return result
class SpeechCommandsDataModule:
def __init__(
self,
data_dir: Path,
batch_size: int,
num_workers: int,
):
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
@staticmethod
def prepare_data(
data_dir: Path,
keywords: List[str],
) -> Dict[str, List[Union[Path, str]]]:
wav_paths = list(f for f in data_dir.glob('**/*.wav'))
targets = list()
for p in wav_paths:
flag = p.parents[0].name in keywords
targets.append(int(flag))
#TODO target: float?
data = dict(
wav_paths=wav_paths,
targets=targets,
)
return data
def setup(
self,
val_ratio,
):
data = self.prepare_data(
data_dir=self.data_dir,
keywords=['right', 'marvin'],
)
wav_paths = data['wav_paths']
targets = data['targets']
full_dataset = SpeechCommandsDataset(
wav_paths=wav_paths,
targets=targets,
)
full_size = len(full_dataset)
val_size = int(val_ratio * full_size)
train_size = full_size - val_size
self.train_dataset, self.val_dataset = torch.utils.data.random_split(
dataset=full_dataset,
lengths=[train_size, val_size],
)
def train_dataloader(self) -> DataLoader:
train_dataloader = DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
return train_dataloader
def val_dataloader(self) -> DataLoader:
val_dataloader = DataLoader(
dataset=self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
return val_dataloader
def test_dataloader(self):
pass | kespo/datamodules/speech_commands_datamodule.py | from pathlib import Path
from typing import Dict, List, Union
import einops
import torch
import torch.nn.functional as F
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
from peach.utils import TokenConverter
class SpeechCommandsDataset(Dataset):
def __init__(
self,
wav_paths: List[Path],
targets: List[int],
max_waveform_length: int=16000,
):
self.wav_paths = wav_paths
self.targets = targets
self.max_waveform_length = max_waveform_length
def __len__(self):
return len(self.wav_paths)
def __getitem__(self, idx):
wav_path = self.wav_paths[idx]
waveform, sample_rate = torchaudio.load(wav_path)
waveform = einops.rearrange(waveform, 'b x -> (b x)')
padded_waveform = F.pad(
input=waveform,
pad=(0, self.max_waveform_length - len(waveform)),
mode='constant',
value=0,
)
target = torch.tensor(self.targets[idx])
result = (
padded_waveform,
target,
)
return result
class SpeechCommandsDataModule:
def __init__(
self,
data_dir: Path,
batch_size: int,
num_workers: int,
):
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
@staticmethod
def prepare_data(
data_dir: Path,
keywords: List[str],
) -> Dict[str, List[Union[Path, str]]]:
wav_paths = list(f for f in data_dir.glob('**/*.wav'))
targets = list()
for p in wav_paths:
flag = p.parents[0].name in keywords
targets.append(int(flag))
#TODO target: float?
data = dict(
wav_paths=wav_paths,
targets=targets,
)
return data
def setup(
self,
val_ratio,
):
data = self.prepare_data(
data_dir=self.data_dir,
keywords=['right', 'marvin'],
)
wav_paths = data['wav_paths']
targets = data['targets']
full_dataset = SpeechCommandsDataset(
wav_paths=wav_paths,
targets=targets,
)
full_size = len(full_dataset)
val_size = int(val_ratio * full_size)
train_size = full_size - val_size
self.train_dataset, self.val_dataset = torch.utils.data.random_split(
dataset=full_dataset,
lengths=[train_size, val_size],
)
def train_dataloader(self) -> DataLoader:
train_dataloader = DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
return train_dataloader
def val_dataloader(self) -> DataLoader:
val_dataloader = DataLoader(
dataset=self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
return val_dataloader
def test_dataloader(self):
pass | 0.745954 | 0.372591 |
import argparse
import csv
from typing import Dict
EXPECTED_RESOLUTIONS = 6
WANTED_RESOLUTIONS = {144, 240, 360, 480, 720, 1080}
WANTED_RESOLUTION_IDS = {160, 133, 134, 135, 136, 137}
OUTPUT_FILE = 'yt8m_data.csv'
HEADER = ['', 'creator', 'duration', 'id', 'labels', 'ladder', 'title', 'views']
parser = argparse.ArgumentParser(description='Clean data')
parser.add_argument('-i', '--input', action='store', help='Input file to clean', type=str, required=True)
parser.add_argument('-o', '--output', action='store', help='Output file to store results', type=str, required=False)
parser.add_argument('-n', '--number', action='store', help='Number of records to keep', type=int, required=True)
args = parser.parse_args()
input_file = args.input
number_records_to_keep = args.number
if args.output:
OUTPUT_FILE = args.output
def get_renditions(renditions: str) -> Dict:
ladder = {}
renditions = renditions.replace('[', '')
renditions = renditions.replace(']', '')
renditions = renditions.replace('}', '')
renditions = renditions.replace('{', '')
renditions = renditions.replace("'", '')
renditions = renditions.strip()
data = renditions.split(',')
for step in data:
try:
resolution_id = int(step.split('-')[0].strip())
resolution = int(step.split('x')[1].split(' ')[0])
bitrate = float(step.split(':')[1])
if resolution in WANTED_RESOLUTIONS and resolution_id in WANTED_RESOLUTION_IDS:
ladder[resolution] = bitrate
except:
print('There was an error')
return ladder
def read_all_rows(file_to_read):
processed_rows = 0
read_rows = []
with open(file_to_read) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for row in csv_reader:
if processed_rows < number_records_to_keep:
renditions = get_renditions(row[5])
if EXPECTED_RESOLUTIONS == len(renditions):
processed_rows += 1
read_rows.append(row)
else:
break
return read_rows
def write_all_rows(rows_to_write):
with open(OUTPUT_FILE, mode='w') as file_to_write:
csv_writer = csv.writer(file_to_write, delimiter=',', quotechar='"')
csv_writer.writerow(HEADER)
for row in rows_to_write:
csv_writer.writerow(row)
if __name__ == "__main__":
all_rows = read_all_rows(input_file)
write_all_rows(all_rows) | YT8M_downloader/cleaner.py |
import argparse
import csv
from typing import Dict
EXPECTED_RESOLUTIONS = 6
WANTED_RESOLUTIONS = {144, 240, 360, 480, 720, 1080}
WANTED_RESOLUTION_IDS = {160, 133, 134, 135, 136, 137}
OUTPUT_FILE = 'yt8m_data.csv'
HEADER = ['', 'creator', 'duration', 'id', 'labels', 'ladder', 'title', 'views']
parser = argparse.ArgumentParser(description='Clean data')
parser.add_argument('-i', '--input', action='store', help='Input file to clean', type=str, required=True)
parser.add_argument('-o', '--output', action='store', help='Output file to store results', type=str, required=False)
parser.add_argument('-n', '--number', action='store', help='Number of records to keep', type=int, required=True)
args = parser.parse_args()
input_file = args.input
number_records_to_keep = args.number
if args.output:
OUTPUT_FILE = args.output
def get_renditions(renditions: str) -> Dict:
ladder = {}
renditions = renditions.replace('[', '')
renditions = renditions.replace(']', '')
renditions = renditions.replace('}', '')
renditions = renditions.replace('{', '')
renditions = renditions.replace("'", '')
renditions = renditions.strip()
data = renditions.split(',')
for step in data:
try:
resolution_id = int(step.split('-')[0].strip())
resolution = int(step.split('x')[1].split(' ')[0])
bitrate = float(step.split(':')[1])
if resolution in WANTED_RESOLUTIONS and resolution_id in WANTED_RESOLUTION_IDS:
ladder[resolution] = bitrate
except:
print('There was an error')
return ladder
def read_all_rows(file_to_read):
processed_rows = 0
read_rows = []
with open(file_to_read) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
next(csv_reader)
for row in csv_reader:
if processed_rows < number_records_to_keep:
renditions = get_renditions(row[5])
if EXPECTED_RESOLUTIONS == len(renditions):
processed_rows += 1
read_rows.append(row)
else:
break
return read_rows
def write_all_rows(rows_to_write):
with open(OUTPUT_FILE, mode='w') as file_to_write:
csv_writer = csv.writer(file_to_write, delimiter=',', quotechar='"')
csv_writer.writerow(HEADER)
for row in rows_to_write:
csv_writer.writerow(row)
if __name__ == "__main__":
all_rows = read_all_rows(input_file)
write_all_rows(all_rows) | 0.477311 | 0.174481 |
from nfv_vim.database._database_block_storage_module import database_volume_add # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_delete # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_get_list # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_add # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_delete # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_add # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_add # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_add # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_get_list # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_add # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_delete # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_get_list # noqa: F401
from nfv_vim.database._database_image_module import database_image_add # noqa: F401
from nfv_vim.database._database_image_module import database_image_delete # noqa: F401
from nfv_vim.database._database_image_module import database_image_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_get_list # noqa: F401
from nfv_vim.database._database_module import database_dump_data # noqa: F401
from nfv_vim.database._database_module import database_finalize # noqa: F401
from nfv_vim.database._database_module import database_initialize # noqa: F401
from nfv_vim.database._database_module import database_load_data # noqa: F401
from nfv_vim.database._database_module import database_migrate_data # noqa: F401
from nfv_vim.database._database_network_module import database_network_add # noqa: F401
from nfv_vim.database._database_network_module import database_network_delete # noqa: F401
from nfv_vim.database._database_network_module import database_network_get_list # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_add # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_delete # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_get_list # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_add # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_delete # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_get_list # noqa: F401 | nfv/nfv-vim/nfv_vim/database/__init__.py | from nfv_vim.database._database_block_storage_module import database_volume_add # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_delete # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_get_list # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_add # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_delete # noqa: F401
from nfv_vim.database._database_block_storage_module import database_volume_snapshot_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_add # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_host_aggregate_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_add # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_hypervisor_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_group_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_add # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_instance_type_get_list # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_add # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_delete # noqa: F401
from nfv_vim.database._database_compute_module import database_service_host_get_list # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_add # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_delete # noqa: F401
from nfv_vim.database._database_identity_module import database_tenant_get_list # noqa: F401
from nfv_vim.database._database_image_module import database_image_add # noqa: F401
from nfv_vim.database._database_image_module import database_image_delete # noqa: F401
from nfv_vim.database._database_image_module import database_image_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_host_group_get_list # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_add # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_delete # noqa: F401
from nfv_vim.database._database_infrastructure_module import database_system_get_list # noqa: F401
from nfv_vim.database._database_module import database_dump_data # noqa: F401
from nfv_vim.database._database_module import database_finalize # noqa: F401
from nfv_vim.database._database_module import database_initialize # noqa: F401
from nfv_vim.database._database_module import database_load_data # noqa: F401
from nfv_vim.database._database_module import database_migrate_data # noqa: F401
from nfv_vim.database._database_network_module import database_network_add # noqa: F401
from nfv_vim.database._database_network_module import database_network_delete # noqa: F401
from nfv_vim.database._database_network_module import database_network_get_list # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_add # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_delete # noqa: F401
from nfv_vim.database._database_network_module import database_subnet_get_list # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_add # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_delete # noqa: F401
from nfv_vim.database._database_sw_update import database_sw_update_get_list # noqa: F401 | 0.232484 | 0.030705 |
import gc
import math
import multiprocessing
import tempfile
from importlib.util import find_spec
from os.path import join
from typing import Optional
import lightgbm
import numpy
from lightgbm import Booster, record_evaluation
from aydin.regression.base import RegressorBase
from aydin.regression.gbm_utils.callbacks import early_stopping
from aydin.util.log.log import lsection, lprint
class LGBMRegressor(RegressorBase):
"""
The LightGBM Regressor uses the gradient boosting library <a
href="https://github.com/microsoft/LightGBM">LightGBM</a> to perform
regression from a set of feature vectors and target values. LightGBM is a
solid library but we do yet support GPU training and inference. Because
of lack of GPU support LightGBM is slower than CatBoost, sometimes
LightGBM gives better results than Catbboost, but not often enough to
justify the loss of speed.
"""
def __init__(
self,
num_leaves: Optional[int] = None,
max_num_estimators: Optional[int] = None,
max_bin: int = 512,
learning_rate: Optional[float] = None,
loss: str = 'l1',
patience: int = 5,
verbosity: int = -1,
compute_load: float = 0.95,
inference_mode: str = None,
compute_training_loss: bool = False,
):
"""Constructs a LightGBM regressor.
Parameters
----------
num_leaves
Number of leaves in the decision trees.
We recommend values between 128 and 512.
(advanced)
max_num_estimators
Maximum number of estimators (trees). Typical values range from 1024
to 4096. Use larger values for more difficult datasets. If training
stops exactly at these values that is a sign you need to increase this
number. Quality of the results typically increases with the number of
estimators, but so does computation time too.
We do not recommend using a value of more than 10000.
max_bin
Maximum number of allowed bins. The features are quantised into that
many bins. Higher values achieve better quantisation of features but
also leads to longer training and more memory consumption. We do not
recommend changing this parameter.
(advanced)
learning_rate
Learning rate for the catboost model. The learning rate is determined
automatically if the value None is given. We recommend values around 0.01.
(advanced)
loss
Type of loss to be used. Van be 'l1' for L1 loss (MAE), and 'l2' for
L2 loss (RMSE), 'huber' for Huber loss, 'poisson' for Poisson loss,
and 'quantile' for Auantile loss. We recommend using: 'l1'.
(advanced)
patience
Number of rounds after which training stops if no improvement occurs.
(advanced)
verbosity
Verbosity setting of LightGBM.
(advanced)
compute_load
Allowed load on computational resources in percentage, typically used
for CPU training when deciding on how many available cores to use.
(advanced)
inference_mode : str
Choses inference mode: can be 'opencl' for an OpenCL backend,
'lleaves' for the very fast lleaves library (only OSX and Linux),
'lgbm' for the standard lightGBM inference engine, and 'auto' (or None)
tries the best/fastest options first and fallback to lightGBM default
inference.
(advanced)
compute_training_loss : bool
Flag to tell LightGBM whether to compute training loss or not
(advanced)
"""
super().__init__()
self.force_verbose_eval = False
self.num_leaves = 512 if num_leaves is None else num_leaves
self.max_num_estimators = (
int(1e4) if max_num_estimators is None else max_num_estimators
)
self.max_bin = max_bin
self.learning_rate = 0.01 if learning_rate is None else learning_rate
self.metric = loss
self.early_stopping_rounds = patience
self.verbosity = verbosity
self.compute_load = compute_load
self.inference_mode = 'auto' if inference_mode is None else inference_mode
self.compute_training_loss = compute_training_loss # This can be expensive
self.opencl_predictor = None
with lsection("LGBM Regressor"):
lprint(f"learning rate: {self.learning_rate}")
lprint(f"number of leaves: {self.num_leaves}")
lprint(f"max bin: {self.max_bin}")
lprint(f"n_estimators: {self.max_num_estimators}")
lprint(f"patience: {self.early_stopping_rounds}")
lprint(f"inference_mode: {self.inference_mode}")
def _get_params(self, num_samples, dtype=numpy.float32):
# min_data_in_leaf = 20 + int(0.01 * (num_samples / self.num_leaves))
# Preparing objective:
objective = self.metric
if objective.lower() == 'l1':
objective = 'regression_l1'
elif objective.lower() == 'l2':
objective = 'regression_l2'
elif objective.lower() == 'huber':
objective = 'huber'
elif objective.lower() == 'poisson':
objective = 'poisson'
elif objective.lower() == 'quantile':
objective = 'quantile'
else:
objective = 'regression_l1'
lprint(f'objective: {self.num_leaves}')
# Setting max depth:
max_depth = max(3, int(int(math.log2(self.num_leaves))) - 1)
lprint(f'max_depth: {max_depth}')
# Setting max bin:
max_bin = 256 if dtype == numpy.uint8 else self.max_bin
lprint(f'max_bin: {max_bin}')
lprint(f'learning_rate: {self.learning_rate}')
lprint(f'num_leaves: {self.num_leaves}')
params = {
"device": "cpu",
"boosting_type": "gbdt",
'objective': objective,
"learning_rate": self.learning_rate,
"num_leaves": self.num_leaves,
"max_depth": max_depth,
"max_bin": max_bin,
"subsample_for_bin": 200000,
"num_threads": max(1, int(self.compute_load * multiprocessing.cpu_count())),
"metric": self.metric.lower(),
'verbosity': -1,
"bagging_freq": 1,
"bagging_fraction": 0.8,
"lambda_l1": 0.01,
"lambda_l2": 0.01,
}
return params
def _fit(
self, x_train, y_train, x_valid=None, y_valid=None, regressor_callback=None
):
with lsection("GBM regressor fitting:"):
nb_data_points = y_train.shape[0]
self.num_features = x_train.shape[-1]
has_valid_dataset = x_valid is not None and y_valid is not None
lprint(f"Number of data points: {nb_data_points}")
if has_valid_dataset:
lprint(f"Number of validation data points: {y_valid.shape[0]}")
lprint(f"Number of features per data point: {self.num_features}")
train_dataset = lightgbm.Dataset(x_train, y_train)
valid_dataset = (
lightgbm.Dataset(x_valid, y_valid) if has_valid_dataset else None
)
self.__epoch_counter = 0
# We translate the it fgr callback into a lightGBM callback:
# This avoids propagating annoying 'evaluation_result_list[0][2]'
# throughout the codebase...
def lgbm_callback(env):
try:
val_loss = env.evaluation_result_list[0][2]
except Exception as e:
val_loss = 0
lprint("Problem with getting loss from LightGBM 'env' in callback")
print(str(e))
if regressor_callback:
regressor_callback(env.iteration, val_loss, env.model)
else:
lprint(f"Epoch {self.__epoch_counter}: Validation loss: {val_loss}")
self.__epoch_counter += 1
evals_result = {}
self.early_stopping_callback = early_stopping(
self, self.early_stopping_rounds
)
with lsection("GBM regressor fitting now:"):
model = lightgbm.train(
params=self._get_params(nb_data_points, dtype=x_train.dtype),
init_model=None,
train_set=train_dataset,
valid_sets=[valid_dataset, train_dataset]
if self.compute_training_loss
else valid_dataset,
early_stopping_rounds=None if has_valid_dataset else None,
num_boost_round=self.max_num_estimators,
callbacks=[
lgbm_callback,
self.early_stopping_callback,
record_evaluation(evals_result),
]
if has_valid_dataset
else [lgbm_callback],
)
lprint("GBM fitting done.")
del train_dataset
del valid_dataset
if has_valid_dataset:
self.last_valid_loss = evals_result['valid_0'][self.metric][-1]
if self.compute_training_loss:
loss_history = {
'training': evals_result['training'][self.metric],
'validation': evals_result['valid_0'][self.metric],
}
else:
loss_history = {'validation': evals_result['valid_0'][self.metric]}
gc.collect()
return _LGBMModel(model, self.inference_mode, loss_history)
class _LGBMModel:
def __init__(self, model, inference_mode, loss_history):
self.model: Booster = model
self.inference_mode = inference_mode
self.loss_history = loss_history
def _save_internals(self, path: str):
if self.model is not None:
lgbm_model_file = join(path, 'lgbm_model.txt')
self.model.save_model(lgbm_model_file)
def _load_internals(self, path: str):
lgbm_model_file = join(path, 'lgbm_model.txt')
self.model = Booster(model_file=lgbm_model_file)
# We exclude certain fields from saving:
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def predict(self, x):
with lsection("GBM regressor prediction:"):
lprint(f"Number of data points : {x.shape[0]}")
lprint(f"Number of features per data points: {x.shape[-1]}")
# we decide here what 'auto' means:
if self.inference_mode == 'auto':
if x.shape[0] > 5e6:
# Lleaves takes a long time to compile models, so only
# interesting for very large inferences!
self.inference_mode = 'lleaves'
else:
self.inference_mode = 'lgbm'
lprint("GBM regressor predicting now...")
if self.inference_mode == 'opencl' and find_spec('pyopencl'):
try:
return self._predict_opencl(x)
except Exception:
# printing stack trace
# traceback.print_exc()
lprint("Failed OpenCL-based regression!")
if self.inference_mode == 'lleaves' and find_spec('lleaves'):
try:
return self._predict_lleaves(x)
except Exception:
# printing stack trace
# traceback.print_exc()
lprint("Failed lleaves-based regression!")
# This must work!
return self._predict_lgbm(x)
def _predict_lleaves(self, x):
with lsection("Attempting lleaves-based regression."):
# Creating lleaves model and compiling it:
with lsection("Model saving and compilation"):
# Creating temporary file:
with tempfile.NamedTemporaryFile() as temp_file:
# Saving LGBM model:
self.model.save_model(
temp_file.name, num_iteration=self.model.best_iteration
)
import lleaves
llvm_model = lleaves.Model(model_file=temp_file.name)
llvm_model.compile()
prediction = llvm_model.predict(x)
return prediction
def _predict_opencl(self, x):
with lsection("Attempting lleaves-based regression."):
from aydin.regression.gbm_utils.opencl_prediction import GBMOpenCLPrediction
if self.opencl_predictor is None:
self.opencl_predictor = GBMOpenCLPrediction()
prediction = self.opencl_predictor.predict(
self.model, x, num_iteration=self.model.best_iteration
)
# We clear the OpenCL ressources:
del self.opencl_predictor
self.opencl_predictor = None
return prediction
def _predict_lgbm(self, x):
prediction = self.model.predict(x, num_iteration=self.model.best_iteration)
# LGBM is annoying, it spits out float64s
prediction = prediction.astype(numpy.float32, copy=False)
lprint("GBM regressor predicting done!")
return prediction | aydin/regression/lgbm.py | import gc
import math
import multiprocessing
import tempfile
from importlib.util import find_spec
from os.path import join
from typing import Optional
import lightgbm
import numpy
from lightgbm import Booster, record_evaluation
from aydin.regression.base import RegressorBase
from aydin.regression.gbm_utils.callbacks import early_stopping
from aydin.util.log.log import lsection, lprint
class LGBMRegressor(RegressorBase):
"""
The LightGBM Regressor uses the gradient boosting library <a
href="https://github.com/microsoft/LightGBM">LightGBM</a> to perform
regression from a set of feature vectors and target values. LightGBM is a
solid library but we do yet support GPU training and inference. Because
of lack of GPU support LightGBM is slower than CatBoost, sometimes
LightGBM gives better results than Catbboost, but not often enough to
justify the loss of speed.
"""
def __init__(
self,
num_leaves: Optional[int] = None,
max_num_estimators: Optional[int] = None,
max_bin: int = 512,
learning_rate: Optional[float] = None,
loss: str = 'l1',
patience: int = 5,
verbosity: int = -1,
compute_load: float = 0.95,
inference_mode: str = None,
compute_training_loss: bool = False,
):
"""Constructs a LightGBM regressor.
Parameters
----------
num_leaves
Number of leaves in the decision trees.
We recommend values between 128 and 512.
(advanced)
max_num_estimators
Maximum number of estimators (trees). Typical values range from 1024
to 4096. Use larger values for more difficult datasets. If training
stops exactly at these values that is a sign you need to increase this
number. Quality of the results typically increases with the number of
estimators, but so does computation time too.
We do not recommend using a value of more than 10000.
max_bin
Maximum number of allowed bins. The features are quantised into that
many bins. Higher values achieve better quantisation of features but
also leads to longer training and more memory consumption. We do not
recommend changing this parameter.
(advanced)
learning_rate
Learning rate for the catboost model. The learning rate is determined
automatically if the value None is given. We recommend values around 0.01.
(advanced)
loss
Type of loss to be used. Van be 'l1' for L1 loss (MAE), and 'l2' for
L2 loss (RMSE), 'huber' for Huber loss, 'poisson' for Poisson loss,
and 'quantile' for Auantile loss. We recommend using: 'l1'.
(advanced)
patience
Number of rounds after which training stops if no improvement occurs.
(advanced)
verbosity
Verbosity setting of LightGBM.
(advanced)
compute_load
Allowed load on computational resources in percentage, typically used
for CPU training when deciding on how many available cores to use.
(advanced)
inference_mode : str
Choses inference mode: can be 'opencl' for an OpenCL backend,
'lleaves' for the very fast lleaves library (only OSX and Linux),
'lgbm' for the standard lightGBM inference engine, and 'auto' (or None)
tries the best/fastest options first and fallback to lightGBM default
inference.
(advanced)
compute_training_loss : bool
Flag to tell LightGBM whether to compute training loss or not
(advanced)
"""
super().__init__()
self.force_verbose_eval = False
self.num_leaves = 512 if num_leaves is None else num_leaves
self.max_num_estimators = (
int(1e4) if max_num_estimators is None else max_num_estimators
)
self.max_bin = max_bin
self.learning_rate = 0.01 if learning_rate is None else learning_rate
self.metric = loss
self.early_stopping_rounds = patience
self.verbosity = verbosity
self.compute_load = compute_load
self.inference_mode = 'auto' if inference_mode is None else inference_mode
self.compute_training_loss = compute_training_loss # This can be expensive
self.opencl_predictor = None
with lsection("LGBM Regressor"):
lprint(f"learning rate: {self.learning_rate}")
lprint(f"number of leaves: {self.num_leaves}")
lprint(f"max bin: {self.max_bin}")
lprint(f"n_estimators: {self.max_num_estimators}")
lprint(f"patience: {self.early_stopping_rounds}")
lprint(f"inference_mode: {self.inference_mode}")
def _get_params(self, num_samples, dtype=numpy.float32):
# min_data_in_leaf = 20 + int(0.01 * (num_samples / self.num_leaves))
# Preparing objective:
objective = self.metric
if objective.lower() == 'l1':
objective = 'regression_l1'
elif objective.lower() == 'l2':
objective = 'regression_l2'
elif objective.lower() == 'huber':
objective = 'huber'
elif objective.lower() == 'poisson':
objective = 'poisson'
elif objective.lower() == 'quantile':
objective = 'quantile'
else:
objective = 'regression_l1'
lprint(f'objective: {self.num_leaves}')
# Setting max depth:
max_depth = max(3, int(int(math.log2(self.num_leaves))) - 1)
lprint(f'max_depth: {max_depth}')
# Setting max bin:
max_bin = 256 if dtype == numpy.uint8 else self.max_bin
lprint(f'max_bin: {max_bin}')
lprint(f'learning_rate: {self.learning_rate}')
lprint(f'num_leaves: {self.num_leaves}')
params = {
"device": "cpu",
"boosting_type": "gbdt",
'objective': objective,
"learning_rate": self.learning_rate,
"num_leaves": self.num_leaves,
"max_depth": max_depth,
"max_bin": max_bin,
"subsample_for_bin": 200000,
"num_threads": max(1, int(self.compute_load * multiprocessing.cpu_count())),
"metric": self.metric.lower(),
'verbosity': -1,
"bagging_freq": 1,
"bagging_fraction": 0.8,
"lambda_l1": 0.01,
"lambda_l2": 0.01,
}
return params
def _fit(
self, x_train, y_train, x_valid=None, y_valid=None, regressor_callback=None
):
with lsection("GBM regressor fitting:"):
nb_data_points = y_train.shape[0]
self.num_features = x_train.shape[-1]
has_valid_dataset = x_valid is not None and y_valid is not None
lprint(f"Number of data points: {nb_data_points}")
if has_valid_dataset:
lprint(f"Number of validation data points: {y_valid.shape[0]}")
lprint(f"Number of features per data point: {self.num_features}")
train_dataset = lightgbm.Dataset(x_train, y_train)
valid_dataset = (
lightgbm.Dataset(x_valid, y_valid) if has_valid_dataset else None
)
self.__epoch_counter = 0
# We translate the it fgr callback into a lightGBM callback:
# This avoids propagating annoying 'evaluation_result_list[0][2]'
# throughout the codebase...
def lgbm_callback(env):
try:
val_loss = env.evaluation_result_list[0][2]
except Exception as e:
val_loss = 0
lprint("Problem with getting loss from LightGBM 'env' in callback")
print(str(e))
if regressor_callback:
regressor_callback(env.iteration, val_loss, env.model)
else:
lprint(f"Epoch {self.__epoch_counter}: Validation loss: {val_loss}")
self.__epoch_counter += 1
evals_result = {}
self.early_stopping_callback = early_stopping(
self, self.early_stopping_rounds
)
with lsection("GBM regressor fitting now:"):
model = lightgbm.train(
params=self._get_params(nb_data_points, dtype=x_train.dtype),
init_model=None,
train_set=train_dataset,
valid_sets=[valid_dataset, train_dataset]
if self.compute_training_loss
else valid_dataset,
early_stopping_rounds=None if has_valid_dataset else None,
num_boost_round=self.max_num_estimators,
callbacks=[
lgbm_callback,
self.early_stopping_callback,
record_evaluation(evals_result),
]
if has_valid_dataset
else [lgbm_callback],
)
lprint("GBM fitting done.")
del train_dataset
del valid_dataset
if has_valid_dataset:
self.last_valid_loss = evals_result['valid_0'][self.metric][-1]
if self.compute_training_loss:
loss_history = {
'training': evals_result['training'][self.metric],
'validation': evals_result['valid_0'][self.metric],
}
else:
loss_history = {'validation': evals_result['valid_0'][self.metric]}
gc.collect()
return _LGBMModel(model, self.inference_mode, loss_history)
class _LGBMModel:
def __init__(self, model, inference_mode, loss_history):
self.model: Booster = model
self.inference_mode = inference_mode
self.loss_history = loss_history
def _save_internals(self, path: str):
if self.model is not None:
lgbm_model_file = join(path, 'lgbm_model.txt')
self.model.save_model(lgbm_model_file)
def _load_internals(self, path: str):
lgbm_model_file = join(path, 'lgbm_model.txt')
self.model = Booster(model_file=lgbm_model_file)
# We exclude certain fields from saving:
def __getstate__(self):
state = self.__dict__.copy()
del state['model']
return state
def predict(self, x):
with lsection("GBM regressor prediction:"):
lprint(f"Number of data points : {x.shape[0]}")
lprint(f"Number of features per data points: {x.shape[-1]}")
# we decide here what 'auto' means:
if self.inference_mode == 'auto':
if x.shape[0] > 5e6:
# Lleaves takes a long time to compile models, so only
# interesting for very large inferences!
self.inference_mode = 'lleaves'
else:
self.inference_mode = 'lgbm'
lprint("GBM regressor predicting now...")
if self.inference_mode == 'opencl' and find_spec('pyopencl'):
try:
return self._predict_opencl(x)
except Exception:
# printing stack trace
# traceback.print_exc()
lprint("Failed OpenCL-based regression!")
if self.inference_mode == 'lleaves' and find_spec('lleaves'):
try:
return self._predict_lleaves(x)
except Exception:
# printing stack trace
# traceback.print_exc()
lprint("Failed lleaves-based regression!")
# This must work!
return self._predict_lgbm(x)
def _predict_lleaves(self, x):
with lsection("Attempting lleaves-based regression."):
# Creating lleaves model and compiling it:
with lsection("Model saving and compilation"):
# Creating temporary file:
with tempfile.NamedTemporaryFile() as temp_file:
# Saving LGBM model:
self.model.save_model(
temp_file.name, num_iteration=self.model.best_iteration
)
import lleaves
llvm_model = lleaves.Model(model_file=temp_file.name)
llvm_model.compile()
prediction = llvm_model.predict(x)
return prediction
def _predict_opencl(self, x):
with lsection("Attempting lleaves-based regression."):
from aydin.regression.gbm_utils.opencl_prediction import GBMOpenCLPrediction
if self.opencl_predictor is None:
self.opencl_predictor = GBMOpenCLPrediction()
prediction = self.opencl_predictor.predict(
self.model, x, num_iteration=self.model.best_iteration
)
# We clear the OpenCL ressources:
del self.opencl_predictor
self.opencl_predictor = None
return prediction
def _predict_lgbm(self, x):
prediction = self.model.predict(x, num_iteration=self.model.best_iteration)
# LGBM is annoying, it spits out float64s
prediction = prediction.astype(numpy.float32, copy=False)
lprint("GBM regressor predicting done!")
return prediction | 0.887137 | 0.472197 |
name = "seed"
shortDesc = ""
longDesc = """
"""
autoGenerated=True
entry(
index = 0,
label = "H + H <=> H2",
degeneracy = 0.5,
duplicate = True,
kinetics = Arrhenius(A=(5.45e+10,'cm^3/(mol*s)'), n=0, Ea=(6.276,'kJ/mol'), T0=(1,'K'), Tmin=(278,'K'), Tmax=(372,'K'), comment="""Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination
""",
)
entry(
index = 1,
label = "H + H <=> H2",
degeneracy = 0.5,
duplicate = True,
kinetics = Arrhenius(A=(5.45e+10,'cm^3/(mol*s)'), n=0, Ea=(6.276,'kJ/mol'), T0=(1,'K'), Tmin=(278,'K'), Tmax=(372,'K'), comment="""Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination
""",
)
entry(
index = 2,
label = "O2 + H2 <=> H + [O]O",
degeneracy = 4.0,
kinetics = Arrhenius(A=(2.9e+14,'cm^3/(mol*s)','*|/',5), n=0, Ea=(236.982,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(800,'K'), comment="""Matched reaction 306 H2 + O2 <=> HO2_r12 + H in H_Abstraction/training
This reaction matched rate rule [H2;O2b]
family: H_Abstraction"""),
longDesc =
"""
Matched reaction 306 H2 + O2 <=> HO2_r12 + H in H_Abstraction/training
This reaction matched rate rule [H2;O2b]
family: H_Abstraction
""",
)
entry(
index = 3,
label = "O2 + H <=> [O]O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(8.79e+10,'cm^3/(mol*s)'), n=1, Ea=(1.8828,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), Tmax=(6000,'K'), comment="""Matched reaction 104 O2 + H <=> HO2-2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 104 O2 + H <=> HO2-2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
family: R_Recombination
""",
)
entry(
index = 4,
label = "H + OO <=> [O]O + H2",
degeneracy = 2.0,
kinetics = Arrhenius(A=(1.23502,'m^3/(mol*s)'), n=1.634, Ea=(25.4634,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [O/H/NonDeO;H_rad] + [H2O2;Y_rad] for rate rule [H2O2;H_rad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: H_Abstraction"""),
longDesc =
"""
Estimated using average of templates [O/H/NonDeO;H_rad] + [H2O2;Y_rad] for rate rule [H2O2;H_rad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: H_Abstraction
""",
)
entry(
index = 5,
label = "H + [O]O <=> OO",
degeneracy = 1.0,
kinetics = Arrhenius(A=(5250.69,'m^3/(mol*s)'), n=1.27262, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.1368631905, Tref=1000.0, N=1, correlation='Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C
Total Standard Deviation in ln(k): 11.5401827615
Exact match found for rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
Euclidian distance = 0
family: R_Recombination"""),
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C
Total Standard Deviation in ln(k): 11.5401827615
Exact match found for rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
Euclidian distance = 0
family: R_Recombination
""",
)
entry(
index = 6,
label = "[O]O + [O]O <=> O2 + OO",
degeneracy = 1.0,
kinetics = Arrhenius(A=(1.75e+10,'cm^3/(mol*s)'), n=0, Ea=(-13.7026,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Matched reaction 405 HO2_r3 + HO2_r12 <=> H2O2 + O2 in H_Abstraction/training
This reaction matched rate rule [Orad_O_H;O_rad/NonDeO]
family: H_Abstraction"""),
longDesc =
"""
Matched reaction 405 HO2_r3 + HO2_r12 <=> H2O2 + O2 in H_Abstraction/training
This reaction matched rate rule [Orad_O_H;O_rad/NonDeO]
family: H_Abstraction
""",
) | rmgpy/rmg/test_data/restartTest/seed_no_filters/seed/reactions.py |
name = "seed"
shortDesc = ""
longDesc = """
"""
autoGenerated=True
entry(
index = 0,
label = "H + H <=> H2",
degeneracy = 0.5,
duplicate = True,
kinetics = Arrhenius(A=(5.45e+10,'cm^3/(mol*s)'), n=0, Ea=(6.276,'kJ/mol'), T0=(1,'K'), Tmin=(278,'K'), Tmax=(372,'K'), comment="""Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination
""",
)
entry(
index = 1,
label = "H + H <=> H2",
degeneracy = 0.5,
duplicate = True,
kinetics = Arrhenius(A=(5.45e+10,'cm^3/(mol*s)'), n=0, Ea=(6.276,'kJ/mol'), T0=(1,'K'), Tmin=(278,'K'), Tmax=(372,'K'), comment="""Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 56 H + H <=> H2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_2R->H]
family: R_Recombination
""",
)
entry(
index = 2,
label = "O2 + H2 <=> H + [O]O",
degeneracy = 4.0,
kinetics = Arrhenius(A=(2.9e+14,'cm^3/(mol*s)','*|/',5), n=0, Ea=(236.982,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(800,'K'), comment="""Matched reaction 306 H2 + O2 <=> HO2_r12 + H in H_Abstraction/training
This reaction matched rate rule [H2;O2b]
family: H_Abstraction"""),
longDesc =
"""
Matched reaction 306 H2 + O2 <=> HO2_r12 + H in H_Abstraction/training
This reaction matched rate rule [H2;O2b]
family: H_Abstraction
""",
)
entry(
index = 3,
label = "O2 + H <=> [O]O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(8.79e+10,'cm^3/(mol*s)'), n=1, Ea=(1.8828,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), Tmax=(6000,'K'), comment="""Matched reaction 104 O2 + H <=> HO2-2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
family: R_Recombination"""),
longDesc =
"""
Matched reaction 104 O2 + H <=> HO2-2 in R_Recombination/training
This reaction matched rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
family: R_Recombination
""",
)
entry(
index = 4,
label = "H + OO <=> [O]O + H2",
degeneracy = 2.0,
kinetics = Arrhenius(A=(1.23502,'m^3/(mol*s)'), n=1.634, Ea=(25.4634,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [O/H/NonDeO;H_rad] + [H2O2;Y_rad] for rate rule [H2O2;H_rad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: H_Abstraction"""),
longDesc =
"""
Estimated using average of templates [O/H/NonDeO;H_rad] + [H2O2;Y_rad] for rate rule [H2O2;H_rad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: H_Abstraction
""",
)
entry(
index = 5,
label = "H + [O]O <=> OO",
degeneracy = 1.0,
kinetics = Arrhenius(A=(5250.69,'m^3/(mol*s)'), n=1.27262, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), uncertainty=RateUncertainty(mu=0.0, var=33.1368631905, Tref=1000.0, N=1, correlation='Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C',), comment="""BM rule fitted to 2 training reactions at node Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C
Total Standard Deviation in ln(k): 11.5401827615
Exact match found for rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
Euclidian distance = 0
family: R_Recombination"""),
longDesc =
"""
BM rule fitted to 2 training reactions at node Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C
Total Standard Deviation in ln(k): 11.5401827615
Exact match found for rate rule [Root_1R->H_N-2R-inRing_N-2R->H_N-2CNOS->S_N-2CNO->C_Ext-2NO-R_N-2NO->N_N-3R!H->C]
Euclidian distance = 0
family: R_Recombination
""",
)
entry(
index = 6,
label = "[O]O + [O]O <=> O2 + OO",
degeneracy = 1.0,
kinetics = Arrhenius(A=(1.75e+10,'cm^3/(mol*s)'), n=0, Ea=(-13.7026,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Matched reaction 405 HO2_r3 + HO2_r12 <=> H2O2 + O2 in H_Abstraction/training
This reaction matched rate rule [Orad_O_H;O_rad/NonDeO]
family: H_Abstraction"""),
longDesc =
"""
Matched reaction 405 HO2_r3 + HO2_r12 <=> H2O2 + O2 in H_Abstraction/training
This reaction matched rate rule [Orad_O_H;O_rad/NonDeO]
family: H_Abstraction
""",
) | 0.390476 | 0.318267 |
class MinIntHeap(object):
def __init__(self):
self.__capacity = 10
self.__size = 0
self.__items = [0] * self.__capacity
def __get_left_child_index(self, parent_index):
return 2 * parent_index + 1
def __get_right_child_index(self, parent_index):
return 2 * parent_index + 2
def __get_parent_index(self, child_index):
return int((child_index - 1) / 2)
def __has_left_child(self, index):
return self.__get_left_child_index(index) < self.__size
def __has_right_child(self, index):
return self.__get_right_child_index(index) < self.__size
def __has_parent(self, index):
return self.__get_parent_index(index) >= 0
def __left_child(self, index):
return self.__items[self.__get_left_child_index(index)]
def __right_child(self, index):
return self.__items[self.__get_right_child_index(index)]
def __parent(self, index):
return self.__items[self.__get_parent_index(index)]
def __swap(self, index, other_index):
self.__items[index], self.__items[other_index] = self.__items[other_index], self.__items[index]
def __ensure_extra_capacity(self):
if self.__size == self.__capacity:
self.__items += [0] * self.__capacity * 2
self.__capacity *= 2
def peek(self):
if self.__size == 0:
raise IllegalStateException
return self.__items[0]
def poll(self):
if self.__size == 0:
raise IllegalStateException
item = self.__items[0]
self.__items[0] = self.__items[self.__size - 1]
self.__size -= 1
self.__heapify_down()
return item
def add(self, item):
self.__ensure_extra_capacity()
self.__items[self.__size] = item
self.__size += 1
self.__heapify_up()
def __heapify_up(self):
index = self.__size - 1
while (self.__has_parent(index) and self.__parent(index) > self.__items[index]):
self.__swap(self.__get_parent_index(index), index)
index = self.__get_parent_index(index)
def __heapify_down(self):
index = 0
while self.__has_left_child(index):
smaller_child_index = self.__get_left_child_index(index)
if self.__has_right_child(index) and self.__right_child(index) < self.__left_child(index):
smaller_child_index = self.__get_right_child_index(index)
if self.__items[index] < self.__items[smaller_child_index]:
break
else:
self.__swap(index, smaller_child_index)
index = smaller_child_index
def print_items(self):
print(self.__items)
class IllegalStateException(Exception):
pass | src/min_int_heap/min_int_heap.py | class MinIntHeap(object):
def __init__(self):
self.__capacity = 10
self.__size = 0
self.__items = [0] * self.__capacity
def __get_left_child_index(self, parent_index):
return 2 * parent_index + 1
def __get_right_child_index(self, parent_index):
return 2 * parent_index + 2
def __get_parent_index(self, child_index):
return int((child_index - 1) / 2)
def __has_left_child(self, index):
return self.__get_left_child_index(index) < self.__size
def __has_right_child(self, index):
return self.__get_right_child_index(index) < self.__size
def __has_parent(self, index):
return self.__get_parent_index(index) >= 0
def __left_child(self, index):
return self.__items[self.__get_left_child_index(index)]
def __right_child(self, index):
return self.__items[self.__get_right_child_index(index)]
def __parent(self, index):
return self.__items[self.__get_parent_index(index)]
def __swap(self, index, other_index):
self.__items[index], self.__items[other_index] = self.__items[other_index], self.__items[index]
def __ensure_extra_capacity(self):
if self.__size == self.__capacity:
self.__items += [0] * self.__capacity * 2
self.__capacity *= 2
def peek(self):
if self.__size == 0:
raise IllegalStateException
return self.__items[0]
def poll(self):
if self.__size == 0:
raise IllegalStateException
item = self.__items[0]
self.__items[0] = self.__items[self.__size - 1]
self.__size -= 1
self.__heapify_down()
return item
def add(self, item):
self.__ensure_extra_capacity()
self.__items[self.__size] = item
self.__size += 1
self.__heapify_up()
def __heapify_up(self):
index = self.__size - 1
while (self.__has_parent(index) and self.__parent(index) > self.__items[index]):
self.__swap(self.__get_parent_index(index), index)
index = self.__get_parent_index(index)
def __heapify_down(self):
index = 0
while self.__has_left_child(index):
smaller_child_index = self.__get_left_child_index(index)
if self.__has_right_child(index) and self.__right_child(index) < self.__left_child(index):
smaller_child_index = self.__get_right_child_index(index)
if self.__items[index] < self.__items[smaller_child_index]:
break
else:
self.__swap(index, smaller_child_index)
index = smaller_child_index
def print_items(self):
print(self.__items)
class IllegalStateException(Exception):
pass | 0.727298 | 0.330417 |
from solid import *
from solid.utils import * # Not required, but the utils module is useful
import math as M
from lib.item import Item
class ShellBase(Item):
def __init__(self, dim):
super().__init__(dim)
self.color = (87, 117, 144)
self.door_w = 30
self.door_off = 6
self.vnose_h = 42
self.vnose_l = 24
@staticmethod
def frame_cutout(size_v):
(w,h) = size_v
ft = 0.1 # frame thickness
fw = 2 # frame edge width
frame = cube([w, ft, h]) - \
translate([fw,-ft,fw])(cube([w-2*fw, 3*ft, h-2*fw]))
return frame
def render(s):
(w,l,h) = s.getDim()
# --- vnose shelf ---
nose_s_w = w / 4.0 # width of angled segment
nose_s_l = M.sqrt(s.vnose_l*s.vnose_l + nose_s_w*nose_s_w) # length of nose angled segment
nose_s_a = M.atan2(s.vnose_l, nose_s_w) * 180 / M.pi
nose_poly = polygon([(0,0), (nose_s_w,s.vnose_l), (w-nose_s_w,s.vnose_l), (w,0)])
sink_cutout = translate([w/2.0, s.vnose_l/2.0, -1])(cylinder(r=6, h=3))
nose_shelf = translate([0,l,s.vnose_h])(linear_extrude(height=1)(nose_poly) - sink_cutout)
# --- floor ---
pf = square([w,l]) + forward(l)(nose_poly)
floor = linear_extrude(height=0.001)(pf)
# --- vnose windows ---
vw_w = nose_s_l-2
vw_h = h-s.vnose_h-3
vw1 = rotate(nose_s_a)(right(1)(ShellBase.frame_cutout([vw_w,vw_h])))
vw1 = translate([0,l,s.vnose_h+2])(vw1)
vw2 = rotate(180-nose_s_a)(right(1)(ShellBase.frame_cutout([vw_w,vw_h])))
vw2 = translate([w,l,s.vnose_h+2])(vw2)
vw3 = ShellBase.frame_cutout([w-2*nose_s_w-2,vw_h])
vw3 = translate([nose_s_w+1,l+s.vnose_l,s.vnose_h+2])(vw3)
# --- door ---
df = ShellBase.frame_cutout([s.door_w, h])
door = translate([w, l - s.door_off - s.door_w, 0])(rotate(90)(df))
u = union()(nose_shelf, door, vw1, vw2, vw3, floor)
return s.c(s.color, u)
class SilverEagle(ShellBase):
def __init__(self):
super().__init__([6 * 12, 12 * 12, 6.25 * 12])
def desc(s):
return "Weeroll Silver Eagle 6' x 12' x 6'3\""
class SilverStar(ShellBase):
def __init__(self):
super().__init__([79, 12 * 14, 6.25 * 12])
def desc(s):
return "Weeroll Silver Star 6'7\" x 14' x 6'3\"" | lib/shell.py | from solid import *
from solid.utils import * # Not required, but the utils module is useful
import math as M
from lib.item import Item
class ShellBase(Item):
def __init__(self, dim):
super().__init__(dim)
self.color = (87, 117, 144)
self.door_w = 30
self.door_off = 6
self.vnose_h = 42
self.vnose_l = 24
@staticmethod
def frame_cutout(size_v):
(w,h) = size_v
ft = 0.1 # frame thickness
fw = 2 # frame edge width
frame = cube([w, ft, h]) - \
translate([fw,-ft,fw])(cube([w-2*fw, 3*ft, h-2*fw]))
return frame
def render(s):
(w,l,h) = s.getDim()
# --- vnose shelf ---
nose_s_w = w / 4.0 # width of angled segment
nose_s_l = M.sqrt(s.vnose_l*s.vnose_l + nose_s_w*nose_s_w) # length of nose angled segment
nose_s_a = M.atan2(s.vnose_l, nose_s_w) * 180 / M.pi
nose_poly = polygon([(0,0), (nose_s_w,s.vnose_l), (w-nose_s_w,s.vnose_l), (w,0)])
sink_cutout = translate([w/2.0, s.vnose_l/2.0, -1])(cylinder(r=6, h=3))
nose_shelf = translate([0,l,s.vnose_h])(linear_extrude(height=1)(nose_poly) - sink_cutout)
# --- floor ---
pf = square([w,l]) + forward(l)(nose_poly)
floor = linear_extrude(height=0.001)(pf)
# --- vnose windows ---
vw_w = nose_s_l-2
vw_h = h-s.vnose_h-3
vw1 = rotate(nose_s_a)(right(1)(ShellBase.frame_cutout([vw_w,vw_h])))
vw1 = translate([0,l,s.vnose_h+2])(vw1)
vw2 = rotate(180-nose_s_a)(right(1)(ShellBase.frame_cutout([vw_w,vw_h])))
vw2 = translate([w,l,s.vnose_h+2])(vw2)
vw3 = ShellBase.frame_cutout([w-2*nose_s_w-2,vw_h])
vw3 = translate([nose_s_w+1,l+s.vnose_l,s.vnose_h+2])(vw3)
# --- door ---
df = ShellBase.frame_cutout([s.door_w, h])
door = translate([w, l - s.door_off - s.door_w, 0])(rotate(90)(df))
u = union()(nose_shelf, door, vw1, vw2, vw3, floor)
return s.c(s.color, u)
class SilverEagle(ShellBase):
def __init__(self):
super().__init__([6 * 12, 12 * 12, 6.25 * 12])
def desc(s):
return "Weeroll Silver Eagle 6' x 12' x 6'3\""
class SilverStar(ShellBase):
def __init__(self):
super().__init__([79, 12 * 14, 6.25 * 12])
def desc(s):
return "Weeroll Silver Star 6'7\" x 14' x 6'3\"" | 0.714329 | 0.116312 |
from __future__ import unicode_literals
from . import views
from django.shortcuts import redirect
from django.contrib import admin
from . models import Event,Profile
import datetime
import calendar
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from . utils import EventCalendar
# Register your models here.
class EventAdmin(admin.ModelAdmin):
class Meta:
model = Event
list_display = ['day', 'start_time','user' 'end_time', 'notes']
change_list_template = 'change_list.html'
def changelist_view(self, request, extra_context=None):
after_day = request.GET.get('day__gte', None)
extra_context = extra_context or {}
if not after_day:
d = datetime.date.today()
else:
try:
split_after_day = after_day.split('-')
d = datetime.date(year=int(split_after_day[0]), month=int(split_after_day[1]), day=1)
except:
d = datetime.date.today()
previous_month = datetime.date(year=d.year, month=d.month, day=1) # find first day of current month
previous_month = previous_month - datetime.timedelta(days=1) # backs up a single day
previous_month = datetime.date(year=previous_month.year, month=previous_month.month,
day=1) # find first day of previous month
last_day = calendar.monthrange(d.year, d.month)
next_month = datetime.date(year=d.year, month=d.month, day=last_day[1]) # find last day of current month
next_month = next_month + datetime.timedelta(days=1) # forward a single day
next_month = datetime.date(year=next_month.year, month=next_month.month,
day=1) # find first day of next month
extra_context['previous_month'] = redirect('home')
# reverse('views.home') + '?day__gte=' + str(
# previous_month)
extra_context['next_month'] = redirect('home')
# reverse('views.home') + '?day__gte=' + str(next_month)
cal = EventCalendar()
html_calendar = cal.formatmonth(d.year, d.month, withyear=True)
html_calendar = html_calendar.replace('<td ', '<td width="150" height="150"')
extra_context['calendar'] = mark_safe(html_calendar)
return super(EventAdmin, self).changelist_view(request, extra_context)
admin.site.register(Event, EventAdmin)
admin.site.register(Profile) | app/admin.py | from __future__ import unicode_literals
from . import views
from django.shortcuts import redirect
from django.contrib import admin
from . models import Event,Profile
import datetime
import calendar
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from . utils import EventCalendar
# Register your models here.
class EventAdmin(admin.ModelAdmin):
class Meta:
model = Event
list_display = ['day', 'start_time','user' 'end_time', 'notes']
change_list_template = 'change_list.html'
def changelist_view(self, request, extra_context=None):
after_day = request.GET.get('day__gte', None)
extra_context = extra_context or {}
if not after_day:
d = datetime.date.today()
else:
try:
split_after_day = after_day.split('-')
d = datetime.date(year=int(split_after_day[0]), month=int(split_after_day[1]), day=1)
except:
d = datetime.date.today()
previous_month = datetime.date(year=d.year, month=d.month, day=1) # find first day of current month
previous_month = previous_month - datetime.timedelta(days=1) # backs up a single day
previous_month = datetime.date(year=previous_month.year, month=previous_month.month,
day=1) # find first day of previous month
last_day = calendar.monthrange(d.year, d.month)
next_month = datetime.date(year=d.year, month=d.month, day=last_day[1]) # find last day of current month
next_month = next_month + datetime.timedelta(days=1) # forward a single day
next_month = datetime.date(year=next_month.year, month=next_month.month,
day=1) # find first day of next month
extra_context['previous_month'] = redirect('home')
# reverse('views.home') + '?day__gte=' + str(
# previous_month)
extra_context['next_month'] = redirect('home')
# reverse('views.home') + '?day__gte=' + str(next_month)
cal = EventCalendar()
html_calendar = cal.formatmonth(d.year, d.month, withyear=True)
html_calendar = html_calendar.replace('<td ', '<td width="150" height="150"')
extra_context['calendar'] = mark_safe(html_calendar)
return super(EventAdmin, self).changelist_view(request, extra_context)
admin.site.register(Event, EventAdmin)
admin.site.register(Profile) | 0.409103 | 0.089058 |
import threading
import time
import RPi.GPIO as GPIO
from tkinter import messagebox
class StepperMotor:
def __init__(self, pin_step, pin_direction, pin_calibration_microswitch, pin_safety_microswitch,
step_frequency, microswitch_bouncetime=300, calibration_timeout=20, name=""):
"""Interfaces with the steppermotors and limit switches.
Args:
pin_step: GPIO pin number for step pulse output
pin_direction: GPIO pin number for direction output
pin_calibration_microswitch: GPIO pin number for microswitch input in the normal direction
pin_safety_microswitch: GPIO pin number for microswitch input in reverse direction
step_frequency: Step frequency in steps per second
microswitch_bouncetime: Microswitch debounce time in ms
calibration_timeout: Calibration timeout in seconds
name: optional name for debugging purposes
"""
self.pin_step = pin_step
self.pin_direction = pin_direction
self.pin_calibration_microswitch = pin_calibration_microswitch # GPIO pin for microswitch input
self.pin_safety_microswitch = pin_safety_microswitch
self.step_frequency = step_frequency
self.default_step_frequency = step_frequency
self.calibration_timeout = calibration_timeout
self.microswitch_bouncetime = microswitch_bouncetime
self.name = name
self.reversed = False # If true then direction is reversed ie digital output HIGH
self.microswitch_hit_event = threading.Event() # Set when the microswitch is hit, cleared when start stepping
self.stop_step_event = threading.Event() # Set it to stop stepping. Cleared when start stepping.
self.lock_step_frequency = threading.Lock()
# Setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_step, GPIO.OUT, initial=GPIO.LOW)
self.step_pwm = GPIO.PWM(self.pin_step, self.default_step_frequency)
GPIO.setup(self.pin_direction, GPIO.OUT, initial=GPIO.LOW)
# Setup interrupts for limit switches if used
self.ignore_interrupt = False
if self.pin_calibration_microswitch is not None and self.pin_safety_microswitch is not None:
GPIO.setup(self.pin_calibration_microswitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.pin_safety_microswitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Setup microswitch interrupt
GPIO.add_event_detect(self.pin_calibration_microswitch, GPIO.RISING, callback=self.microswitch_callback,
bouncetime=self.microswitch_bouncetime)
GPIO.add_event_detect(self.pin_safety_microswitch, GPIO.RISING, callback=self.microswitch_callback,
bouncetime=self.microswitch_bouncetime)
def enable_interrupts(self):
self.ignore_interrupt = False
def disable_interrupts(self):
self.ignore_interrupt = True
def start_step(self, count=None):
"""Start stepping
Args:
count: the number of steps to make. Since using the built in pwm this is only approximated by setting timer,
instead of actually counting the steps."""
self.stop_step_event.clear()
self.microswitch_hit_event.clear()
if count is not None:
# Move a set amount of steps with the default speed if count is given
self.step_frequency = self.default_step_frequency
threading.Timer(self.step_frequency / count, self.stop_step).start()
self.step_pwm.start(50)
def stop_step(self):
"""Stop stepping"""
self.stop_step_event.set()
self.step_pwm.stop()
self.microswitch_hit_event.clear()
def set_duty_cycle(self, value):
"""Set pwm duty cycle"""
self.step_pwm.ChangeDutyCycle(value)
def reverse(self, setting):
"""Reverse motor direction
Args:
setting: optional set to True or False to choose a direction, if None the direction is inverted
"""
if setting is not None:
if setting:
GPIO.output(self.pin_direction, GPIO.LOW)
else:
GPIO.output(self.pin_direction, GPIO.HIGH)
self.reversed = setting
else:
if self.reversed:
GPIO.output(self.pin_direction, GPIO.HIGH)
else:
GPIO.output(self.pin_direction, GPIO.LOW)
self.reversed = not self.reversed
def calibrate(self):
"""Calibrate motor to zero position.
The motor is moved all the way to one side until the microswitch is hit."""
# check if switch is already pressed, if so then don't move -> the motor is already on its zero position
if GPIO.input(self.pin_calibration_microswitch) == GPIO.HIGH or GPIO.input(
self.pin_safety_microswitch) == GPIO.HIGH:
self.stop_step_event.set()
return
if self.pin_calibration_microswitch is not None:
self.reverse(False)
self.step_frequency = self.default_step_frequency
self.start_step()
if self.microswitch_hit_event.wait(self.calibration_timeout):
self.step_counter = 0
else:
self.stop_step()
messagebox.showerror('FOUT', 'Fout tijdens calibratie: Timeout (kalibreren duurt te lang)')
else:
messagebox.showerror('FOUT',
'Voor deze motor is geen eindschakelaar ingesteld en er kan niet worden gekalibreert')
def microswitch_callback(self, channel):
"""Interrupt callback. This function is called when the microswitch is pressed."""
if self.ignore_interrupt:
return
# Filter out interrupts caused by random noise by checking again after 10ms
time.sleep(0.01)
if GPIO.input(self.pin_calibration_microswitch) == GPIO.HIGH or GPIO.input(
self.pin_safety_microswitch) == GPIO.HIGH:
self.microswitch_hit_event.set()
self.stop_step()
print("interrupt {} {}".format(self.name, channel))
class CalibrationError(BaseException):
pass | steppermotor.py | import threading
import time
import RPi.GPIO as GPIO
from tkinter import messagebox
class StepperMotor:
def __init__(self, pin_step, pin_direction, pin_calibration_microswitch, pin_safety_microswitch,
step_frequency, microswitch_bouncetime=300, calibration_timeout=20, name=""):
"""Interfaces with the steppermotors and limit switches.
Args:
pin_step: GPIO pin number for step pulse output
pin_direction: GPIO pin number for direction output
pin_calibration_microswitch: GPIO pin number for microswitch input in the normal direction
pin_safety_microswitch: GPIO pin number for microswitch input in reverse direction
step_frequency: Step frequency in steps per second
microswitch_bouncetime: Microswitch debounce time in ms
calibration_timeout: Calibration timeout in seconds
name: optional name for debugging purposes
"""
self.pin_step = pin_step
self.pin_direction = pin_direction
self.pin_calibration_microswitch = pin_calibration_microswitch # GPIO pin for microswitch input
self.pin_safety_microswitch = pin_safety_microswitch
self.step_frequency = step_frequency
self.default_step_frequency = step_frequency
self.calibration_timeout = calibration_timeout
self.microswitch_bouncetime = microswitch_bouncetime
self.name = name
self.reversed = False # If true then direction is reversed ie digital output HIGH
self.microswitch_hit_event = threading.Event() # Set when the microswitch is hit, cleared when start stepping
self.stop_step_event = threading.Event() # Set it to stop stepping. Cleared when start stepping.
self.lock_step_frequency = threading.Lock()
# Setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_step, GPIO.OUT, initial=GPIO.LOW)
self.step_pwm = GPIO.PWM(self.pin_step, self.default_step_frequency)
GPIO.setup(self.pin_direction, GPIO.OUT, initial=GPIO.LOW)
# Setup interrupts for limit switches if used
self.ignore_interrupt = False
if self.pin_calibration_microswitch is not None and self.pin_safety_microswitch is not None:
GPIO.setup(self.pin_calibration_microswitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(self.pin_safety_microswitch, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Setup microswitch interrupt
GPIO.add_event_detect(self.pin_calibration_microswitch, GPIO.RISING, callback=self.microswitch_callback,
bouncetime=self.microswitch_bouncetime)
GPIO.add_event_detect(self.pin_safety_microswitch, GPIO.RISING, callback=self.microswitch_callback,
bouncetime=self.microswitch_bouncetime)
def enable_interrupts(self):
self.ignore_interrupt = False
def disable_interrupts(self):
self.ignore_interrupt = True
def start_step(self, count=None):
"""Start stepping
Args:
count: the number of steps to make. Since using the built in pwm this is only approximated by setting timer,
instead of actually counting the steps."""
self.stop_step_event.clear()
self.microswitch_hit_event.clear()
if count is not None:
# Move a set amount of steps with the default speed if count is given
self.step_frequency = self.default_step_frequency
threading.Timer(self.step_frequency / count, self.stop_step).start()
self.step_pwm.start(50)
def stop_step(self):
"""Stop stepping"""
self.stop_step_event.set()
self.step_pwm.stop()
self.microswitch_hit_event.clear()
def set_duty_cycle(self, value):
"""Set pwm duty cycle"""
self.step_pwm.ChangeDutyCycle(value)
def reverse(self, setting):
"""Reverse motor direction
Args:
setting: optional set to True or False to choose a direction, if None the direction is inverted
"""
if setting is not None:
if setting:
GPIO.output(self.pin_direction, GPIO.LOW)
else:
GPIO.output(self.pin_direction, GPIO.HIGH)
self.reversed = setting
else:
if self.reversed:
GPIO.output(self.pin_direction, GPIO.HIGH)
else:
GPIO.output(self.pin_direction, GPIO.LOW)
self.reversed = not self.reversed
def calibrate(self):
"""Calibrate motor to zero position.
The motor is moved all the way to one side until the microswitch is hit."""
# check if switch is already pressed, if so then don't move -> the motor is already on its zero position
if GPIO.input(self.pin_calibration_microswitch) == GPIO.HIGH or GPIO.input(
self.pin_safety_microswitch) == GPIO.HIGH:
self.stop_step_event.set()
return
if self.pin_calibration_microswitch is not None:
self.reverse(False)
self.step_frequency = self.default_step_frequency
self.start_step()
if self.microswitch_hit_event.wait(self.calibration_timeout):
self.step_counter = 0
else:
self.stop_step()
messagebox.showerror('FOUT', 'Fout tijdens calibratie: Timeout (kalibreren duurt te lang)')
else:
messagebox.showerror('FOUT',
'Voor deze motor is geen eindschakelaar ingesteld en er kan niet worden gekalibreert')
def microswitch_callback(self, channel):
"""Interrupt callback. This function is called when the microswitch is pressed."""
if self.ignore_interrupt:
return
# Filter out interrupts caused by random noise by checking again after 10ms
time.sleep(0.01)
if GPIO.input(self.pin_calibration_microswitch) == GPIO.HIGH or GPIO.input(
self.pin_safety_microswitch) == GPIO.HIGH:
self.microswitch_hit_event.set()
self.stop_step()
print("interrupt {} {}".format(self.name, channel))
class CalibrationError(BaseException):
pass | 0.810891 | 0.290874 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def from_env(env_options, default=None):
value = None
if isinstance(env_options, str):
value = os.environ.get(env_options, None)
else:
for option in env_options:
if option in os.environ:
value = os.environ[option]
break
if value is None:
if default is not None:
return default
else:
raise ValueError(
"Environment variable(s) '{}' must be set or have a default".format(env_options))
return value
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
DEBUG = False
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.OreUser'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'compressor',
'crispy_forms',
'reversion',
'actstream',
'rest_framework',
'ore.core',
'ore.accounts',
'ore.organizations',
'ore.projects',
'ore.teams',
'ore.versions',
'ore.flags',
'ore.discourse_sso',
'ore.discourse_discuss',
)
MIDDLEWARE_CLASSES = (
'reversion.middleware.RevisionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ore.urls'
WSGI_APPLICATION = 'ore.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# compressor finder
'compressor.finders.CompressorFinder',
)
# Default location for static files
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Compression
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"ore.core.context_processors.build_stamp",
]
}
},
]
LOGIN_REDIRECT_URL = '/'
# Activity Stream
ACTSTREAM_SETTINGS = {
'MANAGER': 'actstream.managers.ActionManager',
'FETCH_RELATIONS': True,
'USE_JSONFIELD': False,
}
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Django Sites Framework
SITE_ID = 1
# Prohibited names (for namespaces, and projects, and versions)
PROHIBITED_NAMES = (
'manage',
'new',
'create',
'delete',
'flag',
'explore',
'describe',
'rename',
'upload',
'versions',
'version',
'projects',
'project',
'admin',
'administrator',
'static',
'settings',
'config',
'setting',
'login',
'logout',
'log-in',
'log-out',
'user',
'users',
'accounts',
'account',
'organization',
'organizations',
'org',
'orgs',
'staff',
'sponge',
'spongepowered',
'spongeproject',
'platform',
'admins',
'ore',
)
BUILD_STAMP_PATH = os.path.join(os.path.dirname(BASE_DIR), 'build_stamp.txt')
BUILD_STAMP = None
if os.path.exists(BUILD_STAMP_PATH):
with open(BUILD_STAMP_PATH, 'r') as f:
BUILD_STAMP = f.read().strip()
DISCOURSE_SSO_ENABLED = from_env('DISCOURSE_SSO_ENABLED', False) == 'true'
if DISCOURSE_SSO_ENABLED:
DISCOURSE_SSO_URL = from_env(
'DISCOURSE_SSO_URL', 'https://forums.spongepowered.org/session/sso_provider')
DISCOURSE_SSO_SECRET = from_env('DISCOURSE_SSO_SECRET')
DISCOURSE_DISCUSS_ENABLED = from_env(
'DISCOURSE_DISCUSS_ENABLED', False) == 'true'
if DISCOURSE_DISCUSS_ENABLED:
DISCOURSE_DISCUSS_API = from_env(
'DISCOURSE_DISCUSS_API', 'https://forums.spongepowered.org')
DISCOURSE_DISCUSS_CATEGORY = from_env(
'DISCOURSE_DISCUSS_CATEGORY', 'ore')
DISCOURSE_DISCUSS_API_KEY = from_env('DISCOURSE_DISCUSS_API_KEY')
DISCOURSE_DISCUSS_ORE_SITE_BASE = from_env(
'DISCOURSE_DISCUSS_ORE_SITE_BASE', 'https://ore-staging.spongepowered.org') | ore/settings/base.py | import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def from_env(env_options, default=None):
value = None
if isinstance(env_options, str):
value = os.environ.get(env_options, None)
else:
for option in env_options:
if option in os.environ:
value = os.environ[option]
break
if value is None:
if default is not None:
return default
else:
raise ValueError(
"Environment variable(s) '{}' must be set or have a default".format(env_options))
return value
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
DEBUG = False
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.OreUser'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'compressor',
'crispy_forms',
'reversion',
'actstream',
'rest_framework',
'ore.core',
'ore.accounts',
'ore.organizations',
'ore.projects',
'ore.teams',
'ore.versions',
'ore.flags',
'ore.discourse_sso',
'ore.discourse_discuss',
)
MIDDLEWARE_CLASSES = (
'reversion.middleware.RevisionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ore.urls'
WSGI_APPLICATION = 'ore.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# compressor finder
'compressor.finders.CompressorFinder',
)
# Default location for static files
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Compression
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"ore.core.context_processors.build_stamp",
]
}
},
]
LOGIN_REDIRECT_URL = '/'
# Activity Stream
ACTSTREAM_SETTINGS = {
'MANAGER': 'actstream.managers.ActionManager',
'FETCH_RELATIONS': True,
'USE_JSONFIELD': False,
}
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Django Sites Framework
SITE_ID = 1
# Prohibited names (for namespaces, and projects, and versions)
PROHIBITED_NAMES = (
'manage',
'new',
'create',
'delete',
'flag',
'explore',
'describe',
'rename',
'upload',
'versions',
'version',
'projects',
'project',
'admin',
'administrator',
'static',
'settings',
'config',
'setting',
'login',
'logout',
'log-in',
'log-out',
'user',
'users',
'accounts',
'account',
'organization',
'organizations',
'org',
'orgs',
'staff',
'sponge',
'spongepowered',
'spongeproject',
'platform',
'admins',
'ore',
)
BUILD_STAMP_PATH = os.path.join(os.path.dirname(BASE_DIR), 'build_stamp.txt')
BUILD_STAMP = None
if os.path.exists(BUILD_STAMP_PATH):
with open(BUILD_STAMP_PATH, 'r') as f:
BUILD_STAMP = f.read().strip()
DISCOURSE_SSO_ENABLED = from_env('DISCOURSE_SSO_ENABLED', False) == 'true'
if DISCOURSE_SSO_ENABLED:
DISCOURSE_SSO_URL = from_env(
'DISCOURSE_SSO_URL', 'https://forums.spongepowered.org/session/sso_provider')
DISCOURSE_SSO_SECRET = from_env('DISCOURSE_SSO_SECRET')
DISCOURSE_DISCUSS_ENABLED = from_env(
'DISCOURSE_DISCUSS_ENABLED', False) == 'true'
if DISCOURSE_DISCUSS_ENABLED:
DISCOURSE_DISCUSS_API = from_env(
'DISCOURSE_DISCUSS_API', 'https://forums.spongepowered.org')
DISCOURSE_DISCUSS_CATEGORY = from_env(
'DISCOURSE_DISCUSS_CATEGORY', 'ore')
DISCOURSE_DISCUSS_API_KEY = from_env('DISCOURSE_DISCUSS_API_KEY')
DISCOURSE_DISCUSS_ORE_SITE_BASE = from_env(
'DISCOURSE_DISCUSS_ORE_SITE_BASE', 'https://ore-staging.spongepowered.org') | 0.371821 | 0.069415 |
from __future__ import division
from collections import defaultdict
import itertools
import numpy as np
from bbox_tools import IOU
def calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difcs=None,
iou_thresh=0.5
):
pred_bboxes = iter(pred_bboxes)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_difcs is None:
gt_difcs = itertools.repeat(None)
else:
gt_difcs = iter(gt_difcs)
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difc in \
zip(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difcs):
if gt_difc is None:
gt_difcs = np.zeros(gt_bbox.shape[0], dtype=bool)
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
# 查找预测类别
pr_mask = (pred_label == l) # 一张图上所有的框预测为类别l的为1其余为0的向量
pred_bbox_l = pred_bbox[pr_mask] #
pred_score_l = pred_score[pr_mask]
# 排序
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
# 查找真实类别
gt_mask = (gt_label == l)
gt_bbox_l = gt_bbox[gt_mask]
gt_difc_l = gt_difc[gt_mask] # 难易程度,一个gt框对应一个值
n_pos[l] += np.logical_not(gt_difc_l).sum() # 一张图上所有的l类的框为容易的个数
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
# defaultdict(list, {0: [0, 0, 0], 1: [0, 0, 0], 2: [1, 1]})
continue
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = IOU(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
gt_index[iou.max(axis=1) < iou_thresh] = -1 # 如果iou小于阈值索引就设为-1
del iou
selec = np.zeros(gt_bbox_l.shape[0],dtype=bool) # 一张图上gt框的数量,最初都未被选中
# 每个预测框都有一个match对应
# -1:标记为困难,0:iou小于阈值不匹配或者已经被选中过,1:行
for idx in gt_index:
if idx >= 0:
if gt_difc_l[idx]: # 困难模式不计入
match[l].append(-1)
else:
if not selec[idx]:
match[l].append(1)
else:
match[l].append(0)
selec[idx] = True # 第idx个框被选中
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1 # 最大类别数+1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys(): # 对于当前图片的每个类别
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1) # 真正例
fp = np.cumsum(match_l == 0) # 假正例
prec[l] = tp / (fp + tp) # 查全率 (fp + tp):预测为正例的总数目
if n_pos[l] > 0:
rec[l] = tp / n_pos[l] # n_pos[l]是gt框的真实标签,代表图上真的有对应个l类的数目
return prec, rec
def calc_detection_voc_ap(prec, rec, voc07=False):
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if voc07:
ap[l] = 0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
# 使用0代替数组x中的nan元素,使用有限的数字代替inf元素
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
# d = np.array([2, 0, 3, -4, -2, 7, 9])
# np.maximum.accumulate(d)# array([2, 2, 3, 3, 3, 7, 9])
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
i = np.where(mrec[1:] != mrec[:-1])[0]
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def eval_detection_voc(
pred_bboxes, pred_labels, pred_scores, gt_bboes, gt_labels, gt_difcs=None,
iou_thresh=0.5, voc07 = False):
prec, rec = calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores,
gt_bboes, gt_labels, gt_difcs, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, voc07=voc07)
return {'ap': ap, 'map': np.nanmean(ap)} | Faster RCNN/eval_tools.py | from __future__ import division
from collections import defaultdict
import itertools
import numpy as np
from bbox_tools import IOU
def calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difcs=None,
iou_thresh=0.5
):
pred_bboxes = iter(pred_bboxes)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_difcs is None:
gt_difcs = itertools.repeat(None)
else:
gt_difcs = iter(gt_difcs)
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difc in \
zip(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difcs):
if gt_difc is None:
gt_difcs = np.zeros(gt_bbox.shape[0], dtype=bool)
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
# 查找预测类别
pr_mask = (pred_label == l) # 一张图上所有的框预测为类别l的为1其余为0的向量
pred_bbox_l = pred_bbox[pr_mask] #
pred_score_l = pred_score[pr_mask]
# 排序
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
# 查找真实类别
gt_mask = (gt_label == l)
gt_bbox_l = gt_bbox[gt_mask]
gt_difc_l = gt_difc[gt_mask] # 难易程度,一个gt框对应一个值
n_pos[l] += np.logical_not(gt_difc_l).sum() # 一张图上所有的l类的框为容易的个数
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
# defaultdict(list, {0: [0, 0, 0], 1: [0, 0, 0], 2: [1, 1]})
continue
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = IOU(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
gt_index[iou.max(axis=1) < iou_thresh] = -1 # 如果iou小于阈值索引就设为-1
del iou
selec = np.zeros(gt_bbox_l.shape[0],dtype=bool) # 一张图上gt框的数量,最初都未被选中
# 每个预测框都有一个match对应
# -1:标记为困难,0:iou小于阈值不匹配或者已经被选中过,1:行
for idx in gt_index:
if idx >= 0:
if gt_difc_l[idx]: # 困难模式不计入
match[l].append(-1)
else:
if not selec[idx]:
match[l].append(1)
else:
match[l].append(0)
selec[idx] = True # 第idx个框被选中
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1 # 最大类别数+1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys(): # 对于当前图片的每个类别
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1) # 真正例
fp = np.cumsum(match_l == 0) # 假正例
prec[l] = tp / (fp + tp) # 查全率 (fp + tp):预测为正例的总数目
if n_pos[l] > 0:
rec[l] = tp / n_pos[l] # n_pos[l]是gt框的真实标签,代表图上真的有对应个l类的数目
return prec, rec
def calc_detection_voc_ap(prec, rec, voc07=False):
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if voc07:
ap[l] = 0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
# 使用0代替数组x中的nan元素,使用有限的数字代替inf元素
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
# d = np.array([2, 0, 3, -4, -2, 7, 9])
# np.maximum.accumulate(d)# array([2, 2, 3, 3, 3, 7, 9])
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
i = np.where(mrec[1:] != mrec[:-1])[0]
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def eval_detection_voc(
pred_bboxes, pred_labels, pred_scores, gt_bboes, gt_labels, gt_difcs=None,
iou_thresh=0.5, voc07 = False):
prec, rec = calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores,
gt_bboes, gt_labels, gt_difcs, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, voc07=voc07)
return {'ap': ap, 'map': np.nanmean(ap)} | 0.280715 | 0.266259 |
import struct
import copy
from test_framework.mininode import ser_vector, deser_vector, COutPoint, deser_uint256, deser_compact_size, CTxOut
class msg_getutxos(object):
command = b"getutxos"
def __init__(self, checkmempool = False, outpoints = []):
self.checkmempool = checkmempool
self.outpoints = outpoints
def deserialize(self, f):
self.checkmempool = struct.unpack("<?", f.read(1))[0]
self.outpoints = deser_vector(f, COutPoint)
def serialize(self):
r = b""
r += struct.pack("<?", self.checkmempool)
r += ser_vector(self.outpoints)
return r
def __repr__(self):
return "msg_getutxos(checkmempool=%s, outpoints=%s)" % (self.checkmempool, repr(self.outpoints))
class BIP64Coin(object):
def __init__(self, bip64coin = None):
if bip64coin is None:
self.txversion = 0,
self.height = 0,
self.out = None
else:
self.txversion = bip64coin.txversion
self.height = bip64.height
self.out = copy.deepcopy(bip64coin.out)
def deserialize(self, f):
self.txversion = struct.unpack("<I", f.read(4))[0]
self.height = struct.unpack("<I", f.read(4))[0]
self.out = CTxOut()
self.out.deserialize(f)
def serialize(self):
r = b""
r += struct.pack("<I", self.txversion)
r += struct.pack("<I", self.height)
r += self.out.serialize()
return r
def __repr__(self):
return "BIP64Coin(txversion=%i height=%i out=%s)" % (self.txversion, self.height, repr(self.out))
def deser_byte_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<B", f.read(1))[0]
r.append(t)
return r
def ser_byte_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("B", i)
return r
class msg_utxos(object):
command = b"utxos"
def __init__(self, height = 0, hash = 0, bitmap = [], result = []):
self.height = height
self.hash = hash
self.bitmap = bitmap
self.result = result
def deserialize(self, f):
self.height = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
self.bitmap = deser_byte_vector(f)
self.result = deser_vector(f, BIP64Coin)
def serialize(self):
r = b""
r += self.pack("<I", self.height)
r += ser_uint256(self.hash)
r += ser_byte_vector(self.bitmap)
r += ser_vector(BIP64Coin, self.result)
return r
def __repr__(self):
return "msg_utxos(height=%i hash=%064x bitmap=%s result=%s)" % (self.height, self.hash, repr(self.bitmap), repr(self.result)) | qa/rpc-tests/test_framework/bip64.py | import struct
import copy
from test_framework.mininode import ser_vector, deser_vector, COutPoint, deser_uint256, deser_compact_size, CTxOut
class msg_getutxos(object):
command = b"getutxos"
def __init__(self, checkmempool = False, outpoints = []):
self.checkmempool = checkmempool
self.outpoints = outpoints
def deserialize(self, f):
self.checkmempool = struct.unpack("<?", f.read(1))[0]
self.outpoints = deser_vector(f, COutPoint)
def serialize(self):
r = b""
r += struct.pack("<?", self.checkmempool)
r += ser_vector(self.outpoints)
return r
def __repr__(self):
return "msg_getutxos(checkmempool=%s, outpoints=%s)" % (self.checkmempool, repr(self.outpoints))
class BIP64Coin(object):
def __init__(self, bip64coin = None):
if bip64coin is None:
self.txversion = 0,
self.height = 0,
self.out = None
else:
self.txversion = bip64coin.txversion
self.height = bip64.height
self.out = copy.deepcopy(bip64coin.out)
def deserialize(self, f):
self.txversion = struct.unpack("<I", f.read(4))[0]
self.height = struct.unpack("<I", f.read(4))[0]
self.out = CTxOut()
self.out.deserialize(f)
def serialize(self):
r = b""
r += struct.pack("<I", self.txversion)
r += struct.pack("<I", self.height)
r += self.out.serialize()
return r
def __repr__(self):
return "BIP64Coin(txversion=%i height=%i out=%s)" % (self.txversion, self.height, repr(self.out))
def deser_byte_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<B", f.read(1))[0]
r.append(t)
return r
def ser_byte_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("B", i)
return r
class msg_utxos(object):
command = b"utxos"
def __init__(self, height = 0, hash = 0, bitmap = [], result = []):
self.height = height
self.hash = hash
self.bitmap = bitmap
self.result = result
def deserialize(self, f):
self.height = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
self.bitmap = deser_byte_vector(f)
self.result = deser_vector(f, BIP64Coin)
def serialize(self):
r = b""
r += self.pack("<I", self.height)
r += ser_uint256(self.hash)
r += ser_byte_vector(self.bitmap)
r += ser_vector(BIP64Coin, self.result)
return r
def __repr__(self):
return "msg_utxos(height=%i hash=%064x bitmap=%s result=%s)" % (self.height, self.hash, repr(self.bitmap), repr(self.result)) | 0.445288 | 0.10235 |
import threading
import time
from pathlib import Path
from typing import List
from daipecore.container.WatcherLogger import WatcherLogger
class ConfigsWatcherThread(threading.Thread):
def __init__(self, configs_dir: str, watcher_logger: WatcherLogger, callback, polling_interval=1):
threading.Thread.__init__(self)
self._configs_dir = configs_dir
self._watcher_logger = watcher_logger
self._callback = callback
self._polling_interval = polling_interval
def excepthook(args):
watcher_logger.error("Configs watcher failed")
watcher_logger.error(str(args))
threading.excepthook = excepthook
def run(self):
from datetime import datetime as dt
self._watcher_logger.info(f"Watching of {self._configs_dir} started")
def get_config_file_paths(base_path: str):
return [Path(p) for p in Path(base_path).glob("*") if p.suffix in {".yaml", ".yml"}]
def get_files_with_timestamp(paths: List[Path]):
return {str(path): path.stat().st_mtime for path in paths}
files_with_timestamp_previous = get_files_with_timestamp(get_config_file_paths(self._configs_dir))
while True:
files_with_timestamp_new = get_files_with_timestamp(get_config_file_paths(self._configs_dir))
for path, timestamp in files_with_timestamp_previous.items():
if path not in files_with_timestamp_new:
self._watcher_logger.info(f"Existing file deleted: {path}")
self._callback()
break
if files_with_timestamp_new[path] > timestamp:
time_string = dt.fromtimestamp(files_with_timestamp_new[path]).strftime("%m.%d.%Y_%H:%M:%S")
self._watcher_logger.info(f"File changed: {path}, timestamp: {time_string}")
self._callback()
break
new_files_only = set(files_with_timestamp_new.keys()) - set(files_with_timestamp_previous.keys())
if new_files_only != set():
self._watcher_logger.info(f"New file(s) found: {new_files_only}")
self._callback()
files_with_timestamp_previous = files_with_timestamp_new
time.sleep(self._polling_interval) | src/daipecore/container/ConfigsWatcherThread.py | import threading
import time
from pathlib import Path
from typing import List
from daipecore.container.WatcherLogger import WatcherLogger
class ConfigsWatcherThread(threading.Thread):
def __init__(self, configs_dir: str, watcher_logger: WatcherLogger, callback, polling_interval=1):
threading.Thread.__init__(self)
self._configs_dir = configs_dir
self._watcher_logger = watcher_logger
self._callback = callback
self._polling_interval = polling_interval
def excepthook(args):
watcher_logger.error("Configs watcher failed")
watcher_logger.error(str(args))
threading.excepthook = excepthook
def run(self):
from datetime import datetime as dt
self._watcher_logger.info(f"Watching of {self._configs_dir} started")
def get_config_file_paths(base_path: str):
return [Path(p) for p in Path(base_path).glob("*") if p.suffix in {".yaml", ".yml"}]
def get_files_with_timestamp(paths: List[Path]):
return {str(path): path.stat().st_mtime for path in paths}
files_with_timestamp_previous = get_files_with_timestamp(get_config_file_paths(self._configs_dir))
while True:
files_with_timestamp_new = get_files_with_timestamp(get_config_file_paths(self._configs_dir))
for path, timestamp in files_with_timestamp_previous.items():
if path not in files_with_timestamp_new:
self._watcher_logger.info(f"Existing file deleted: {path}")
self._callback()
break
if files_with_timestamp_new[path] > timestamp:
time_string = dt.fromtimestamp(files_with_timestamp_new[path]).strftime("%m.%d.%Y_%H:%M:%S")
self._watcher_logger.info(f"File changed: {path}, timestamp: {time_string}")
self._callback()
break
new_files_only = set(files_with_timestamp_new.keys()) - set(files_with_timestamp_previous.keys())
if new_files_only != set():
self._watcher_logger.info(f"New file(s) found: {new_files_only}")
self._callback()
files_with_timestamp_previous = files_with_timestamp_new
time.sleep(self._polling_interval) | 0.66072 | 0.104798 |
import unittest
from mock import MagicMock
from state import BaseState, CorridorState
class BaseStateTests(unittest.TestCase):
"""Unit tests for the BaseState class."""
def setUp(self):
self.mock_robot = MagicMock()
self.state = BaseState(self.mock_robot)
def test_init(self):
self.assertEqual(self.state.robot, self.mock_robot)
self.assertFalse(self.state.is_oriented)
def test_run(self):
"""Verify that BaseState can't be used to run the robot."""
with self.assertRaises(NotImplementedError):
self.state.run()
class CorridorStateTests(unittest.TestCase):
"""Unit tests for the CorridorState class."""
def setUp(self):
self.mock_robot = MagicMock()
self.state = CorridorState(self.mock_robot)
def test_find_perpendicular(self):
"""Find not the minimum, but the center of the "dip"."""
test_cases = [
([6, 7, 8, 7, 6, 5, 4, 3, 4, 5], 7), # Right, looking right
([7, 6, 6, 6, 7, 8, 8, 7, 6, 5], 2), # Right, looking left
([4, 5, 6, 7, 8, 7, 6, 6, 6, 7], 7), # Left, looking right
([5, 4, 3, 3, 4, 5, 6, 6, 7, 6], 2), # Left, looking left
([8, 7, 6, 5, 4, 3, 3, 4, 5, 6], 5), # Looking straight at a wall
([3, 4, 5, 6, 7, 8, 8, 8, 7, 6], 9), # Looking down the corridor
([7, 7, 8, 8, 8, 8, 7, 6, 6, 6], 8), # Looking right, flat at end
([6, 6, 6, 7, 8, 8, 8, 8, 7, 7], 1) # Looking left, flat at start
]
for test_case in test_cases:
self.assertEqual(self.state._find_perpendicular(test_case[0]),
test_case[1])
def test_find_p_heading(self):
orig_find_perpendicular = self.state._find_perpendicular
self.state._find_perpendicular = MagicMock()
heading_histogram = [0.2, 0.6, 0.2]
test_cases = [
(0, [0] * 17 + heading_histogram + [0] * 16), # Centered at 270
(11, [0] * 10 + heading_histogram + [0] * 23) # Centered at 020
]
for test_case in test_cases:
self.state._find_perpendicular.return_value = test_case[0]
self.assertEqual(self.state._find_p_heading(), test_case[1])
self.state._find_perpendicular = orig_find_perpendicular
def test_rotate_p_heading(self):
p_histogram = [0.2, 0.6, 0.2]
self.state.p_heading = [0] * 17 + p_histogram + [0] * 16
test_cases = [
(-30, [0] * 14 + p_histogram + [0] * 19),
(30, [0] * 20 + p_histogram + [0] * 13)
]
for test_case in test_cases:
self.assertEqual(
self.state._rotate_p_heading(test_case[0]),
test_case[1]
)
def test_get_wall_direction(self):
p_histogram = [0.2, 0.6, 0.2]
test_cases = [
([0] * 18 + p_histogram + [0] * 15, [270, 280, 290]),
([0] * 9 + p_histogram + [0] * 24, [0, 10, 20])
]
for test_case in test_cases:
wall_direction = self.state._get_wall_direction(test_case[0])
self.assertTrue(
wall_direction in test_case[1],
'{0} not in {1}'.format(wall_direction, test_case[1])
)
def test_get_corridor_direction(self):
p_histogram = [0.2, 0.6, 0.2]
test_cases = [
([0] * 18 + p_histogram + [0] * 15, [0, 10, 20]),
([0] * 9 + p_histogram + [0] * 24, [270, 280, 290])
]
for test_case in test_cases:
corridor_direction = self.state._get_corridor_direction(test_case[0])
self.assertTrue(
corridor_direction in test_case[1],
'{0} not in {1}'.format(corridor_direction, test_case[1])
) | src/tests/state_test.py |
import unittest
from mock import MagicMock
from state import BaseState, CorridorState
class BaseStateTests(unittest.TestCase):
"""Unit tests for the BaseState class."""
def setUp(self):
self.mock_robot = MagicMock()
self.state = BaseState(self.mock_robot)
def test_init(self):
self.assertEqual(self.state.robot, self.mock_robot)
self.assertFalse(self.state.is_oriented)
def test_run(self):
"""Verify that BaseState can't be used to run the robot."""
with self.assertRaises(NotImplementedError):
self.state.run()
class CorridorStateTests(unittest.TestCase):
"""Unit tests for the CorridorState class."""
def setUp(self):
self.mock_robot = MagicMock()
self.state = CorridorState(self.mock_robot)
def test_find_perpendicular(self):
"""Find not the minimum, but the center of the "dip"."""
test_cases = [
([6, 7, 8, 7, 6, 5, 4, 3, 4, 5], 7), # Right, looking right
([7, 6, 6, 6, 7, 8, 8, 7, 6, 5], 2), # Right, looking left
([4, 5, 6, 7, 8, 7, 6, 6, 6, 7], 7), # Left, looking right
([5, 4, 3, 3, 4, 5, 6, 6, 7, 6], 2), # Left, looking left
([8, 7, 6, 5, 4, 3, 3, 4, 5, 6], 5), # Looking straight at a wall
([3, 4, 5, 6, 7, 8, 8, 8, 7, 6], 9), # Looking down the corridor
([7, 7, 8, 8, 8, 8, 7, 6, 6, 6], 8), # Looking right, flat at end
([6, 6, 6, 7, 8, 8, 8, 8, 7, 7], 1) # Looking left, flat at start
]
for test_case in test_cases:
self.assertEqual(self.state._find_perpendicular(test_case[0]),
test_case[1])
def test_find_p_heading(self):
orig_find_perpendicular = self.state._find_perpendicular
self.state._find_perpendicular = MagicMock()
heading_histogram = [0.2, 0.6, 0.2]
test_cases = [
(0, [0] * 17 + heading_histogram + [0] * 16), # Centered at 270
(11, [0] * 10 + heading_histogram + [0] * 23) # Centered at 020
]
for test_case in test_cases:
self.state._find_perpendicular.return_value = test_case[0]
self.assertEqual(self.state._find_p_heading(), test_case[1])
self.state._find_perpendicular = orig_find_perpendicular
def test_rotate_p_heading(self):
p_histogram = [0.2, 0.6, 0.2]
self.state.p_heading = [0] * 17 + p_histogram + [0] * 16
test_cases = [
(-30, [0] * 14 + p_histogram + [0] * 19),
(30, [0] * 20 + p_histogram + [0] * 13)
]
for test_case in test_cases:
self.assertEqual(
self.state._rotate_p_heading(test_case[0]),
test_case[1]
)
def test_get_wall_direction(self):
p_histogram = [0.2, 0.6, 0.2]
test_cases = [
([0] * 18 + p_histogram + [0] * 15, [270, 280, 290]),
([0] * 9 + p_histogram + [0] * 24, [0, 10, 20])
]
for test_case in test_cases:
wall_direction = self.state._get_wall_direction(test_case[0])
self.assertTrue(
wall_direction in test_case[1],
'{0} not in {1}'.format(wall_direction, test_case[1])
)
def test_get_corridor_direction(self):
p_histogram = [0.2, 0.6, 0.2]
test_cases = [
([0] * 18 + p_histogram + [0] * 15, [0, 10, 20]),
([0] * 9 + p_histogram + [0] * 24, [270, 280, 290])
]
for test_case in test_cases:
corridor_direction = self.state._get_corridor_direction(test_case[0])
self.assertTrue(
corridor_direction in test_case[1],
'{0} not in {1}'.format(corridor_direction, test_case[1])
) | 0.70028 | 0.686933 |
import argparse
import os
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from pytorch_pretrained_bert import GPT2Tokenizer
def main():
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--work_dir', type=str, required=True,
help='path to the work_dir')
parser.add_argument('--context', type=str, default='',
help='Conditional generation context')
parser.add_argument('--top_k', type=int, default=0,
help='Limit sampling to top K probabilities. If 0, use all.')
parser.add_argument('--top_p', type=float, default=0,
help='Limit sampling to p nucleus sampling. If 0, use all.')
parser.add_argument('--length', type=int, default=200,
help='what sequence length to generate')
parser.add_argument('--max_context', type=int, default=384,
help='Maximum context length the model uses during generation')
parser.add_argument('--batch_size', type=int, default=10,
help='what sequence length to generate')
parser.add_argument("--temperature", type=float, default=1.0)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model-best.pt'), 'rb') as f:
model = torch.load(f, map_location='cuda' if torch.cuda.is_available() else 'cpu')
if not torch.cuda.is_available():
model = model.float()
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
NL = tokenizer.encode('\n')
model = model.to(device)
model.eval()
## Init
data = torch.tensor(NL*4 + tokenizer.encode(args.context)).to(device)
# Turn into a batch.
data.unsqueeze_(1)
data = data.repeat_interleave(args.batch_size, dim=1)
if not hasattr(model, 'init_mems'):
model = model.module
mems = model.init_mems()
for i in tqdm.trange(args.length):
## Grab a sample from the last frame, append to result list, append to `data`
# TODO: using mems breaks generation. Find a way to fix?
pred_hid, mems_ = predict(model, data[-args.max_context:], mems)
softmax = hidden_to_softmax(model, pred_hid[-1], top_k=args.top_k, temperature=args.temperature, top_p=args.top_p)
new_sample = torch.multinomial(softmax, num_samples=1).unsqueeze(-1).squeeze(2)
data = torch.cat((data, new_sample.t()), dim=0)
for i in range(data.size(1)):
print('=' * 40, 'sample', i + 1, '=' * 40)
# Chop off the newlines before printing
print(tokenizer.decode(data[4:, i].tolist()))
def predict(model, data, mems):
tgt_len = data.size(0)
with torch.no_grad():
hidden, new_mems = model._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
return pred_hid, new_mems
def hidden_to_softmax(model, hidden, temperature=1, top_k=0, top_p=0):
"""Turn a hidden projection into log softmax.
Adapted from utils/proj_adaptive_softmax.py
"""
# pas stands for ProjectedAdaptiveSoftmax
pas = model.crit
logits = pas._compute_logit(hidden, pas.out_layers[0].weight,
pas.out_layers[0].bias, pas.out_projs[0])
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
logits /= temperature
softmax = F.softmax(logits, dim=-1)
return softmax
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Args:
logits: logits distribution shape (..., vocabulary size)
top_k >0: keep only top k tokens with highest probability (top-k filtering).
top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs >= top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = torch.zeros_like(logits, dtype=torch.uint8).scatter_(
dim=-1, index=sorted_indices, src=sorted_indices_to_remove )
logits[indices_to_remove] = filter_value
return logits
if __name__ == '__main__':
main() | transformer_xl/generate.py | import argparse
import os
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from pytorch_pretrained_bert import GPT2Tokenizer
def main():
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--work_dir', type=str, required=True,
help='path to the work_dir')
parser.add_argument('--context', type=str, default='',
help='Conditional generation context')
parser.add_argument('--top_k', type=int, default=0,
help='Limit sampling to top K probabilities. If 0, use all.')
parser.add_argument('--top_p', type=float, default=0,
help='Limit sampling to p nucleus sampling. If 0, use all.')
parser.add_argument('--length', type=int, default=200,
help='what sequence length to generate')
parser.add_argument('--max_context', type=int, default=384,
help='Maximum context length the model uses during generation')
parser.add_argument('--batch_size', type=int, default=10,
help='what sequence length to generate')
parser.add_argument("--temperature", type=float, default=1.0)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model-best.pt'), 'rb') as f:
model = torch.load(f, map_location='cuda' if torch.cuda.is_available() else 'cpu')
if not torch.cuda.is_available():
model = model.float()
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
NL = tokenizer.encode('\n')
model = model.to(device)
model.eval()
## Init
data = torch.tensor(NL*4 + tokenizer.encode(args.context)).to(device)
# Turn into a batch.
data.unsqueeze_(1)
data = data.repeat_interleave(args.batch_size, dim=1)
if not hasattr(model, 'init_mems'):
model = model.module
mems = model.init_mems()
for i in tqdm.trange(args.length):
## Grab a sample from the last frame, append to result list, append to `data`
# TODO: using mems breaks generation. Find a way to fix?
pred_hid, mems_ = predict(model, data[-args.max_context:], mems)
softmax = hidden_to_softmax(model, pred_hid[-1], top_k=args.top_k, temperature=args.temperature, top_p=args.top_p)
new_sample = torch.multinomial(softmax, num_samples=1).unsqueeze(-1).squeeze(2)
data = torch.cat((data, new_sample.t()), dim=0)
for i in range(data.size(1)):
print('=' * 40, 'sample', i + 1, '=' * 40)
# Chop off the newlines before printing
print(tokenizer.decode(data[4:, i].tolist()))
def predict(model, data, mems):
tgt_len = data.size(0)
with torch.no_grad():
hidden, new_mems = model._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
return pred_hid, new_mems
def hidden_to_softmax(model, hidden, temperature=1, top_k=0, top_p=0):
"""Turn a hidden projection into log softmax.
Adapted from utils/proj_adaptive_softmax.py
"""
# pas stands for ProjectedAdaptiveSoftmax
pas = model.crit
logits = pas._compute_logit(hidden, pas.out_layers[0].weight,
pas.out_layers[0].bias, pas.out_projs[0])
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
logits /= temperature
softmax = F.softmax(logits, dim=-1)
return softmax
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Args:
logits: logits distribution shape (..., vocabulary size)
top_k >0: keep only top k tokens with highest probability (top-k filtering).
top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs >= top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = torch.zeros_like(logits, dtype=torch.uint8).scatter_(
dim=-1, index=sorted_indices, src=sorted_indices_to_remove )
logits[indices_to_remove] = filter_value
return logits
if __name__ == '__main__':
main() | 0.674801 | 0.232299 |
import mock
import six
from senlin.api.middleware import fault
from senlin.api.openstack.v1 import policy_types
from senlin.common import exception as senlin_exc
from senlin.common import policy
from senlin.rpc import client as rpc_client
from senlin.tests.apiv1 import shared
from senlin.tests.common import base
@mock.patch.object(policy.Enforcer, 'enforce')
class PolicyTypeControllerTest(shared.ControllerTest, base.SenlinTestCase):
def setUp(self):
super(PolicyTypeControllerTest, self).setUp()
class DummyConfig(object):
bind_port = 8778
cfgopts = DummyConfig()
self.controller = policy_types.PolicyTypeController(options=cfgopts)
def test_policy_type_list(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/policy_types')
engine_response = [{'name': 'os.heat.stack'},
{'name': 'os.nova.server'}]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual({'policy_types': engine_response}, response)
mock_call.assert_called_once_with(req.context,
('policy_type_list', {}))
def test_policy_type_list_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/policy_types')
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_policy_type_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', True)
type_name = 'SimplePolicy'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
engine_response = {
'policy_type': type_name,
'spec': {
'Foo': {'type': 'String', 'required': False},
'Bar': {'type': 'Integer', 'required': False},
},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.schema(req, tenant_id=self.tenant,
type_name=type_name)
mock_call.assert_called_once_with(
req.context,
('policy_type_schema', {'type_name': type_name}))
self.assertEqual(engine_response, response)
def test_policy_type_schema_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', True)
type_name = 'BogusPolicyType'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
error = senlin_exc.PolicyTypeNotFound(policy_type=type_name)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.schema,
req, tenant_id=self.tenant,
type_name=type_name)
mock_call.assert_called_once_with(
req.context,
('policy_type_schema', {'type_name': type_name}))
self.assertEqual(404, resp.json['code'])
self.assertEqual('PolicyTypeNotFound', resp.json['error']['type'])
def test_policy_type_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', False)
type_name = 'BogusPolicyType'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp)) | senlin/tests/apiv1/test_policy_types.py |
import mock
import six
from senlin.api.middleware import fault
from senlin.api.openstack.v1 import policy_types
from senlin.common import exception as senlin_exc
from senlin.common import policy
from senlin.rpc import client as rpc_client
from senlin.tests.apiv1 import shared
from senlin.tests.common import base
@mock.patch.object(policy.Enforcer, 'enforce')
class PolicyTypeControllerTest(shared.ControllerTest, base.SenlinTestCase):
def setUp(self):
super(PolicyTypeControllerTest, self).setUp()
class DummyConfig(object):
bind_port = 8778
cfgopts = DummyConfig()
self.controller = policy_types.PolicyTypeController(options=cfgopts)
def test_policy_type_list(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/policy_types')
engine_response = [{'name': 'os.heat.stack'},
{'name': 'os.nova.server'}]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual({'policy_types': engine_response}, response)
mock_call.assert_called_once_with(req.context,
('policy_type_list', {}))
def test_policy_type_list_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/policy_types')
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_policy_type_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', True)
type_name = 'SimplePolicy'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
engine_response = {
'policy_type': type_name,
'spec': {
'Foo': {'type': 'String', 'required': False},
'Bar': {'type': 'Integer', 'required': False},
},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
response = self.controller.schema(req, tenant_id=self.tenant,
type_name=type_name)
mock_call.assert_called_once_with(
req.context,
('policy_type_schema', {'type_name': type_name}))
self.assertEqual(engine_response, response)
def test_policy_type_schema_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', True)
type_name = 'BogusPolicyType'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
error = senlin_exc.PolicyTypeNotFound(policy_type=type_name)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.schema,
req, tenant_id=self.tenant,
type_name=type_name)
mock_call.assert_called_once_with(
req.context,
('policy_type_schema', {'type_name': type_name}))
self.assertEqual(404, resp.json['code'])
self.assertEqual('PolicyTypeNotFound', resp.json['error']['type'])
def test_policy_type_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'schema', False)
type_name = 'BogusPolicyType'
req = self._get('/policy_types/%(type)s' % {'type': type_name})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp)) | 0.482429 | 0.11689 |
import logging, pickle, threading, urllib2
from multiprocessing.dummy import Pool
from flask import Flask, jsonify
from json import dumps
from flasgger import Swagger
from port_retrieval import get_nsi_topology, parse_ports
app = Flask(__name__)
Swagger(app)
topology_lock = threading.Lock()
#~ import logging_tree
#~ logging_tree.printout()
TOPOLOGY_FILENAME = 'topology.pkl'
@app.route("/nsi/domains")
def get_domains():
"""
Get list of NSI domains registered in NSI AG
---
tags:
- NSI domains
responses:
200:
description: Returns list of URN identifiers of NSI domains registered in NSI AG
schema:
type: array
items:
description: URI identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
type: string
"""
topology = get_nsi_topology(app.config["dds_service"])
with topology_lock:
with open(TOPOLOGY_FILENAME, 'w+b') as topology_file:
if topology:
pickle.dump(topology, topology_file)
else:
topology = pickle.load(topology_file)
return dumps(topology.keys(), indent=2), 200, {'Content-Type': 'application/json'}
@app.route("/nsi/domains/<string:domain>/ports")
def get_domain_ports(domain):
"""
Get list of ports in given NSI domain
---
tags:
- NSI domains
parameters:
- name: domain
in: path
description: URN identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
type: string
responses:
200:
description: list of URN identifiers of ports in the NSI domain
schema:
type: array
items:
description: port name
type: string
"""
ports = {}
try:
with topology_lock:
topology = pickle.load(open(TOPOLOGY_FILENAME, 'rb'))
if 'href' in topology[domain]:
ports = get_domain_ports(topology[domain]['href'])
topology[domain].update(ports)
with topology_lock:
pickle.dump(topology, open(TOPOLOGY_FILENAME, 'wb'))
else:
ports = topology[domain]
except:
import traceback
app.logger.error(traceback.format_exc())
return dumps(ports.keys(), indent=2), 200, {'Content-Type': 'application/json'}
@app.route("/nsi/domains/<string:domain>/ports/<string:port>")
def get_domain_port_vlans(domain, port):
"""
Get vlans for given port in the NSI domain
---
tags:
- NSI domains
parameters:
- name: domain
description: URN identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
in: path
type: string
required: true
- name: port
in: path
description: port name
type: string
required: true
responses:
200:
description: VLAN properties of the port
schema:
properties:
vlans_in:
type: string
description: list of incoming VLANs acceptable for NSI domain, example 4-2000,3000-4096
required: true
vlans_out:
type: string
description: list of outgoing VLANs acceptable for NSI domain, example 4-2000,3000-4096
required: true
"""
try:
with topology_lock:
topology = pickle.load(open(TOPOLOGY_FILENAME, 'rb'))
return jsonify(topology[domain][port])
except:
return jsonify([])
@app.route("/nsi/domains-full")
def get_domains_full():
"""
Get list of NSI domains and its ports
---
tags:
- NSI domains
responses:
200:
description: Returns list of NSI domains, its ports and vlan attributes
"""
topology = get_nsi_topology(app.config["dds_service"])
with topology_lock:
with open(TOPOLOGY_FILENAME, 'w+b') as topology_file:
if topology:
pickle.dump(topology, topology_file)
else:
topology = pickle.load(topology_file)
domain_url_list = []
for domain, desc in topology.items():
if 'href' in desc:
domain_url_list.append((domain, topology[domain]['href']))
del desc['href']
def fetch(domain_url):
try:
domain, url = domain_url
app.logger.info("Getting %s",url)
data = urllib2.urlopen(url, timeout=1).read()
app.logger.info("Data retrieved from %s", url)
return domain, data
except EnvironmentError as e:
app.logger.info("Failing get %s", url)
return domain, None
pool = Pool(20)
for domain, content in pool.imap_unordered(fetch, domain_url_list):
if content:
ports = parse_ports(content)
topology[domain].update(ports)
return jsonify(topology) | src/rest_api.py | import logging, pickle, threading, urllib2
from multiprocessing.dummy import Pool
from flask import Flask, jsonify
from json import dumps
from flasgger import Swagger
from port_retrieval import get_nsi_topology, parse_ports
app = Flask(__name__)
Swagger(app)
topology_lock = threading.Lock()
#~ import logging_tree
#~ logging_tree.printout()
TOPOLOGY_FILENAME = 'topology.pkl'
@app.route("/nsi/domains")
def get_domains():
"""
Get list of NSI domains registered in NSI AG
---
tags:
- NSI domains
responses:
200:
description: Returns list of URN identifiers of NSI domains registered in NSI AG
schema:
type: array
items:
description: URI identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
type: string
"""
topology = get_nsi_topology(app.config["dds_service"])
with topology_lock:
with open(TOPOLOGY_FILENAME, 'w+b') as topology_file:
if topology:
pickle.dump(topology, topology_file)
else:
topology = pickle.load(topology_file)
return dumps(topology.keys(), indent=2), 200, {'Content-Type': 'application/json'}
@app.route("/nsi/domains/<string:domain>/ports")
def get_domain_ports(domain):
"""
Get list of ports in given NSI domain
---
tags:
- NSI domains
parameters:
- name: domain
in: path
description: URN identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
type: string
responses:
200:
description: list of URN identifiers of ports in the NSI domain
schema:
type: array
items:
description: port name
type: string
"""
ports = {}
try:
with topology_lock:
topology = pickle.load(open(TOPOLOGY_FILENAME, 'rb'))
if 'href' in topology[domain]:
ports = get_domain_ports(topology[domain]['href'])
topology[domain].update(ports)
with topology_lock:
pickle.dump(topology, open(TOPOLOGY_FILENAME, 'wb'))
else:
ports = topology[domain]
except:
import traceback
app.logger.error(traceback.format_exc())
return dumps(ports.keys(), indent=2), 200, {'Content-Type': 'application/json'}
@app.route("/nsi/domains/<string:domain>/ports/<string:port>")
def get_domain_port_vlans(domain, port):
"""
Get vlans for given port in the NSI domain
---
tags:
- NSI domains
parameters:
- name: domain
description: URN identifier of NSI domain, example urn:ogf:network:pionier.net.pl:2013:topology
in: path
type: string
required: true
- name: port
in: path
description: port name
type: string
required: true
responses:
200:
description: VLAN properties of the port
schema:
properties:
vlans_in:
type: string
description: list of incoming VLANs acceptable for NSI domain, example 4-2000,3000-4096
required: true
vlans_out:
type: string
description: list of outgoing VLANs acceptable for NSI domain, example 4-2000,3000-4096
required: true
"""
try:
with topology_lock:
topology = pickle.load(open(TOPOLOGY_FILENAME, 'rb'))
return jsonify(topology[domain][port])
except:
return jsonify([])
@app.route("/nsi/domains-full")
def get_domains_full():
"""
Get list of NSI domains and its ports
---
tags:
- NSI domains
responses:
200:
description: Returns list of NSI domains, its ports and vlan attributes
"""
topology = get_nsi_topology(app.config["dds_service"])
with topology_lock:
with open(TOPOLOGY_FILENAME, 'w+b') as topology_file:
if topology:
pickle.dump(topology, topology_file)
else:
topology = pickle.load(topology_file)
domain_url_list = []
for domain, desc in topology.items():
if 'href' in desc:
domain_url_list.append((domain, topology[domain]['href']))
del desc['href']
def fetch(domain_url):
try:
domain, url = domain_url
app.logger.info("Getting %s",url)
data = urllib2.urlopen(url, timeout=1).read()
app.logger.info("Data retrieved from %s", url)
return domain, data
except EnvironmentError as e:
app.logger.info("Failing get %s", url)
return domain, None
pool = Pool(20)
for domain, content in pool.imap_unordered(fetch, domain_url_list):
if content:
ports = parse_ports(content)
topology[domain].update(ports)
return jsonify(topology) | 0.380989 | 0.081228 |
__author__ = "<NAME> (<EMAIL>)"
import json
import time
import urllib
import urllib2
#HTTP Method code
GET = 0
POST = 1
UPLOAD = 2
#Keyword for GET method
GET_KEYWORDS = ["get", "list", "batch"]
class APIError(StandardError):
"""API exception class."""
def __init__(self, code, message):
self.code = code
StandardError.__init__(self, message)
def __unicode__(self):
return u"APIError: %s: %s" % (self.code, self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def encode_str(obj):
"""Encode an object into a utf-8 string."""
if isinstance(obj, basestring):
return obj.encode("utf-8") if isinstance(obj, unicode) else obj
return str(obj)
def encode_params(**kw):
"""Return a URL-encoded string for a dictionary of paramteres."""
return "&".join(["%s=%s" % (k, urllib.quote(encode_str(v)))
for k, v in kw.iteritems()])
def guess_content_type(name):
"""Return the content type by the extension of the filename."""
if name.endswith(".jpg"):
return "image/jpg"
elif name.endswith(".jpeg"):
return "image/jpeg"
elif name.endswith(".png"):
return "image/png"
elif name.endswith(".gif"):
return "image/gif"
elif name.endswith(".bmp"):
return "image/bmp"
return "image/jpg"
def encode_multipart(filename=None, **kw):
"""Return a multipart/form-data body with a randomly generated boundary.
"""
boundary = "----------%s" % hex(int(time.time() * 1000))
params = []
for k, v in kw.iteritems():
params.append("--%s" % boundary)
if hasattr(v, "read"):
content = v.read()
if hasattr(v, "name") and filename is None:
filename = v.name
params.append("Content-Disposition: form-data; name=\"%s\";"
"filename=\"%s\"" % (k, filename))
params.append("Content-Type: %s\r\n" %
guess_content_type(filename))
params.append(content)
else:
params.append("Content-Disposition: form-data; name=\"%s\"\r\n"
% k)
params.append(encode_str(v))
params.append("--%s--\r\n" % boundary)
return "\r\n".join(params), boundary
def http_call(api_url, http_method=POST, **kw):
"""Send a HTTP request to the url and return a JSON object."""
params = None
boundary = None
if http_method == UPLOAD:
params, boundary = encode_multipart(**kw)
else:
params = encode_params(**kw)
req = None
if http_method == GET:
req = urllib2.Request("%s?%s" % (api_url, params))
else:
req = urllib2.Request(api_url, data=params)
if http_method == UPLOAD:
req.add_header("Content-Type",
"multipart/form-data; boundary=%s" % boundary)
try:
resp = urllib2.urlopen(req)
content = resp.read()
result = json.loads(content)
if type(result) is not list and result.get("error_code"):
raise APIError(result.get("error_code", ""),
result.get("error_msg", ""))
return result
except urllib2.HTTPError as e:
raise e
class APIClient:
"""API client class."""
#Oauth URI
OAUTH_URI = "https://graph.renren.com/oauth/"
def __init__(self, app_key, app_secret, redirect_uri,
response_type="code", version=2):
self.app_key = str(app_key)
self.app_secret = str(app_secret)
self.redirect_uri = redirect_uri
self.response_type = response_type
self.access_token = None
self.version = version
def get_authorize_url(self, redirect_uri=None, scope=None,
force_relogin=False):
"""Return the authorization URL."""
redirect = redirect_uri if redirect_uri else self.redirect_uri
params = dict(client_id=self.app_key, redirect_uri=redirect,
response_type=self.response_type)
if scope:
params["scope"] = " ".join(scope)
if force_relogin:
params["x_renew"] = "true"
return "%s%s?%s" % (APIClient.OAUTH_URI, "authorize",
encode_params(**params))
def request_access_token(self, code, redirect_uri=None):
"""Return the access token as a dict.
The dict includes access_token, expires_in, refresh_token,
and scope.
"""
redirect = redirect_uri if redirect_uri else self.redirect_uri
return http_call("%s%s" % (APIClient.OAUTH_URI, "token"), POST,
grant_type="authorization_code", code=code,
client_id=self.app_key, redirect_uri=redirect,
client_secret=self.app_secret)
def refresh_token(self, refresh_token):
"""Return the refreshed access token as a dict.
The dict includes access_token, expires_in, refresh_token,
and scope.
"""
return http_call("%s%s" % (APIClient.OAUTH_URI, "token"), POST,
grant_type="refresh_token",
refresh_token=refresh_token,
client_id=self.app_key,
client_secret=self.app_secret)
def set_access_token(self, access_token):
"""Set access token for the API client."""
self.access_token = str(access_token)
def __getattr__(self, attr):
if self.version == 2:
return APIWrapperV2(self, attr)
return APIWrapper(self, attr)
class APIWrapper:
"""Wrapper Class for API 1.0."""
#API Server URI
API_SERVER = "https://api.renren.com/restserver.do"
#API Version
API_VERSION = "1.0"
def __init__(self, client, name):
self.client = client
self.name = name
def __getattr__(self, attr):
def request(**kw):
"""Send a HTTP Post request to the API server with specified
method.
"""
params = dict(kw, access_token=self.client.access_token,
method="%s.%s" % (self.name, attr),
call_id=str(int(time.time() * 1000)),
v=APIWrapper.API_VERSION)
if not params.get("format"):
params["format"] = "JSON"
http_method = UPLOAD if attr == "upload" else POST
return http_call(APIWrapper.API_SERVER, http_method, **params)
return request
class APIWrapperV2():
"""Wrapper class for API 2.0."""
#API Server URI
API_SERVER = "https://api.renren.com/v2"
def __init__(self, client, name):
self.client = client
self.name = name
def __getattr__(self, attr):
return APIWrapperV2(self.client, "%s/%s" % (self.name, attr))
def __call__(self, **kw):
"""Send a HTTP Post request to the API server with specified
method.
"""
params = dict(kw, access_token=self.client.access_token)
http_method = POST
if any(w in self.name for w in GET_KEYWORDS):
http_method = GET
elif "upload" in self.name:
http_method = UPLOAD
return http_call("%s/%s" % (APIWrapperV2.API_SERVER, self.name),
http_method, **params) | renren.py |
__author__ = "<NAME> (<EMAIL>)"
import json
import time
import urllib
import urllib2
#HTTP Method code
GET = 0
POST = 1
UPLOAD = 2
#Keyword for GET method
GET_KEYWORDS = ["get", "list", "batch"]
class APIError(StandardError):
"""API exception class."""
def __init__(self, code, message):
self.code = code
StandardError.__init__(self, message)
def __unicode__(self):
return u"APIError: %s: %s" % (self.code, self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def encode_str(obj):
"""Encode an object into a utf-8 string."""
if isinstance(obj, basestring):
return obj.encode("utf-8") if isinstance(obj, unicode) else obj
return str(obj)
def encode_params(**kw):
"""Return a URL-encoded string for a dictionary of paramteres."""
return "&".join(["%s=%s" % (k, urllib.quote(encode_str(v)))
for k, v in kw.iteritems()])
def guess_content_type(name):
"""Return the content type by the extension of the filename."""
if name.endswith(".jpg"):
return "image/jpg"
elif name.endswith(".jpeg"):
return "image/jpeg"
elif name.endswith(".png"):
return "image/png"
elif name.endswith(".gif"):
return "image/gif"
elif name.endswith(".bmp"):
return "image/bmp"
return "image/jpg"
def encode_multipart(filename=None, **kw):
"""Return a multipart/form-data body with a randomly generated boundary.
"""
boundary = "----------%s" % hex(int(time.time() * 1000))
params = []
for k, v in kw.iteritems():
params.append("--%s" % boundary)
if hasattr(v, "read"):
content = v.read()
if hasattr(v, "name") and filename is None:
filename = v.name
params.append("Content-Disposition: form-data; name=\"%s\";"
"filename=\"%s\"" % (k, filename))
params.append("Content-Type: %s\r\n" %
guess_content_type(filename))
params.append(content)
else:
params.append("Content-Disposition: form-data; name=\"%s\"\r\n"
% k)
params.append(encode_str(v))
params.append("--%s--\r\n" % boundary)
return "\r\n".join(params), boundary
def http_call(api_url, http_method=POST, **kw):
"""Send a HTTP request to the url and return a JSON object."""
params = None
boundary = None
if http_method == UPLOAD:
params, boundary = encode_multipart(**kw)
else:
params = encode_params(**kw)
req = None
if http_method == GET:
req = urllib2.Request("%s?%s" % (api_url, params))
else:
req = urllib2.Request(api_url, data=params)
if http_method == UPLOAD:
req.add_header("Content-Type",
"multipart/form-data; boundary=%s" % boundary)
try:
resp = urllib2.urlopen(req)
content = resp.read()
result = json.loads(content)
if type(result) is not list and result.get("error_code"):
raise APIError(result.get("error_code", ""),
result.get("error_msg", ""))
return result
except urllib2.HTTPError as e:
raise e
class APIClient:
"""API client class."""
#Oauth URI
OAUTH_URI = "https://graph.renren.com/oauth/"
def __init__(self, app_key, app_secret, redirect_uri,
response_type="code", version=2):
self.app_key = str(app_key)
self.app_secret = str(app_secret)
self.redirect_uri = redirect_uri
self.response_type = response_type
self.access_token = None
self.version = version
def get_authorize_url(self, redirect_uri=None, scope=None,
force_relogin=False):
"""Return the authorization URL."""
redirect = redirect_uri if redirect_uri else self.redirect_uri
params = dict(client_id=self.app_key, redirect_uri=redirect,
response_type=self.response_type)
if scope:
params["scope"] = " ".join(scope)
if force_relogin:
params["x_renew"] = "true"
return "%s%s?%s" % (APIClient.OAUTH_URI, "authorize",
encode_params(**params))
def request_access_token(self, code, redirect_uri=None):
"""Return the access token as a dict.
The dict includes access_token, expires_in, refresh_token,
and scope.
"""
redirect = redirect_uri if redirect_uri else self.redirect_uri
return http_call("%s%s" % (APIClient.OAUTH_URI, "token"), POST,
grant_type="authorization_code", code=code,
client_id=self.app_key, redirect_uri=redirect,
client_secret=self.app_secret)
def refresh_token(self, refresh_token):
"""Return the refreshed access token as a dict.
The dict includes access_token, expires_in, refresh_token,
and scope.
"""
return http_call("%s%s" % (APIClient.OAUTH_URI, "token"), POST,
grant_type="refresh_token",
refresh_token=refresh_token,
client_id=self.app_key,
client_secret=self.app_secret)
def set_access_token(self, access_token):
"""Set access token for the API client."""
self.access_token = str(access_token)
def __getattr__(self, attr):
if self.version == 2:
return APIWrapperV2(self, attr)
return APIWrapper(self, attr)
class APIWrapper:
"""Wrapper Class for API 1.0."""
#API Server URI
API_SERVER = "https://api.renren.com/restserver.do"
#API Version
API_VERSION = "1.0"
def __init__(self, client, name):
self.client = client
self.name = name
def __getattr__(self, attr):
def request(**kw):
"""Send a HTTP Post request to the API server with specified
method.
"""
params = dict(kw, access_token=self.client.access_token,
method="%s.%s" % (self.name, attr),
call_id=str(int(time.time() * 1000)),
v=APIWrapper.API_VERSION)
if not params.get("format"):
params["format"] = "JSON"
http_method = UPLOAD if attr == "upload" else POST
return http_call(APIWrapper.API_SERVER, http_method, **params)
return request
class APIWrapperV2():
"""Wrapper class for API 2.0."""
#API Server URI
API_SERVER = "https://api.renren.com/v2"
def __init__(self, client, name):
self.client = client
self.name = name
def __getattr__(self, attr):
return APIWrapperV2(self.client, "%s/%s" % (self.name, attr))
def __call__(self, **kw):
"""Send a HTTP Post request to the API server with specified
method.
"""
params = dict(kw, access_token=self.client.access_token)
http_method = POST
if any(w in self.name for w in GET_KEYWORDS):
http_method = GET
elif "upload" in self.name:
http_method = UPLOAD
return http_call("%s/%s" % (APIWrapperV2.API_SERVER, self.name),
http_method, **params) | 0.644113 | 0.072047 |