text
stringlengths 2
999k
|
|---|
class IllegalOrderException(Exception):
pass
|
__all__ = ['wfpackage', 'wfsite']
|
# pylint: disable=C1001
class Config:
# Custom
DEBUG_FILE = '/tmp/gmw.log'
ENVIRONMENT = 'development'
# Application related
REPOSITORY_WORTH_SOLID = 6
REPOSITORY_WORTH_DEFAULT = 3
# Flask
DEBUG = True
PERMANENT_SESSION_LIFETIME = 1209600 # 14 days
SECRET_KEY = ''
TESTING = False
# SQLAlchemy
SQLALCHEMY_ECHO = False
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Celery
CELERY_TIMEZONE = 'Europe/Berlin'
CELERY_BROKER_URL = ''
# Oauth
GITHUB_AUTH = (None, None)
GITHUB_OAUTH = {}
class ConfigDevelopment(Config):
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
class ConfigTesting(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
ENVIRONMENT = 'testing'
GITHUB_AUTH = ('Test', '')
SECRET_KEY = 'testing' # noqa
TESTING = True
class ConfigProduction(Config):
DEBUG = False
ENVIRONMENT = 'production'
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for panoptic_maskrcnn.py."""
import os
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.configs import decoders as decoder_cfg
from official.vision.beta.configs import semantic_segmentation as segmentation_cfg
from official.vision.beta.projects.panoptic_maskrcnn.configs import panoptic_maskrcnn as cfg
from official.vision.beta.projects.panoptic_maskrcnn.tasks import panoptic_maskrcnn
class PanopticMaskRCNNTaskTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(['all'],),
(['backbone'],),
(['segmentation_backbone'],),
(['segmentation_decoder'],),
(['backbone', 'segmentation_backbone'],),
(['segmentation_backbone', 'segmentation_decoder'],))
def test_model_initializing(self, init_checkpoint_modules):
shared_backbone = ('segmentation_backbone' not in init_checkpoint_modules)
shared_decoder = ('segmentation_decoder' not in init_checkpoint_modules and
shared_backbone)
task_config = cfg.PanopticMaskRCNNTask(
model=cfg.PanopticMaskRCNN(
num_classes=2,
input_size=[640, 640, 3],
segmentation_model=segmentation_cfg.SemanticSegmentationModel(
decoder=decoder_cfg.Decoder(type='fpn')),
shared_backbone=shared_backbone,
shared_decoder=shared_decoder))
task = panoptic_maskrcnn.PanopticMaskRCNNTask(task_config)
model = task.build_model()
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
ckpt_save_dir = self.create_tempdir().full_path
ckpt.save(os.path.join(ckpt_save_dir, 'ckpt'))
if (init_checkpoint_modules == ['all'] or
'backbone' in init_checkpoint_modules):
task._task_config.init_checkpoint = ckpt_save_dir
if ('segmentation_backbone' in init_checkpoint_modules or
'segmentation_decoder' in init_checkpoint_modules):
task._task_config.segmentation_init_checkpoint = ckpt_save_dir
task._task_config.init_checkpoint_modules = init_checkpoint_modules
task.initialize(model)
if __name__ == '__main__':
tf.test.main()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
ohlc_data = pd.read_csv('nifty-50\SBIN.csv', index_col=0, parse_dates=True)
daily = np.array(ohlc_data)
money = int(input('Enter amount you want to invest : '))
risk = int(input('Enter no of shares you want to buy/sell in each transaction : '))
def doji_detection(data, cash, risk):
initial_amount = cash
transactions = 0
stocks = 0
for i in range(len(data)):
invest = stocks * data[i, 3]
# Bullish Doji
if abs(data[i, 3] - data[i, 0]) < 5 and data[i, 3] < data[i - 1, 3] and cash >= 5 * data[i, 3] and risk * data[i, 3] < cash:
# Buying shares at close
cash -= risk * data[i, 3]
invest += risk * data[i, 3]
stocks += risk
print('\n', i, ') BULL: Bought ', risk, ' at ', data[i, 3], '|| Cash Left = ', cash, '|| Invested = ', invest)
transactions += 1
# Bearish Doji
if abs(data[i, 3] - data[i, 0]) < 5 and data[i, 3] > data[i - 1, 3] and transactions > 0 and stocks > 0:
# Selling shares at closing
cash += risk * data[i, 3]
invest -= risk * data[i, 3]
stocks -= risk
print('\n', i, ') BEAR: Sold ', risk, ' at ', data[i, 3], '|| Cash Left = ', cash, '|| Invested = ', invest)
transactions += 1
if i == 79:
gross = data[i, 3] * stocks + cash
print("\n\n\n\tTotal Amount : Rs. ", gross)
print("\tInitial Amount : Rs. ", initial_amount)
print("\tProfit / Loss : Rs. ", gross - initial_amount)
print('\n\tThanks for using Trading Bot !!!')
"""
def ohlc_plot(data, window, name):
Chosen = data[-window:, ]
for i in range(len(Chosen)):
plt.vlines(x=i, ymin=Chosen[i, 2], ymax=Chosen[i, 1], color='black', linewidth=1)
if Chosen[i, 3] > Chosen[i, 0]:
color_chosen = 'green'
plt.vlines(x=i, ymin=Chosen[i, 0], ymax=Chosen[i, 3], color=color_chosen, linewidth=4)
if Chosen[i, 3] < Chosen[i, 0]:
color_chosen = 'red'
plt.vlines(x=i, ymin=Chosen[i, 3], ymax=Chosen[i, 0], color=color_chosen, linewidth=4)
if Chosen[i, 3] == Chosen[i, 0]:
color_chosen = 'black'
plt.vlines(x=i, ymin=Chosen[i, 3], ymax=Chosen[i, 0], color=color_chosen, linewidth=4)
plt.grid()
plt.title('SBIN (2000-21)')
plt.show()
"""
# Using the function
list = doji_detection(daily, money, risk)
# print(list)
# ohlc_plot(daily, 40, '')
|
from scipy.cluster.vq import kmeans
import numpy as np
import pymc3 as pm
import theano.tensor as tt
cholesky = pm.distributions.dist_math.Cholesky(nofail=True, lower=True)
solve_lower = tt.slinalg.Solve(A_structure='lower_triangular')
solve_upper = tt.slinalg.Solve(A_structure='upper_triangular')
solve = tt.slinalg.Solve(A_structure='general')
def infer_shape(X, n_points=None):
if n_points is None:
try:
n_points = np.int(X.shape[0])
except TypeError:
raise TypeError("Cannot infer 'shape', provide as an argument")
return n_points
def stabilize(K):
""" adds small diagonal to a covariance matrix """
return K + 1e-6 * tt.identity_like(K)
def kmeans_inducing_points(n_inducing, X):
# first whiten X
if isinstance(X, tt.TensorConstant):
X = X.value
elif isinstance(X, (np.ndarray, tuple, list)):
X = np.asarray(X)
else:
raise TypeError(("To use K-means initialization, "
"please provide X as a type that "
"can be cast to np.ndarray, instead "
"of {}".format(type(X))))
scaling = np.std(X, 0)
# if std of a column is very small (zero), don't normalize that column
scaling[scaling <= 1e-6] = 1.0
Xw = X / scaling
Xu, distortion = kmeans(Xw, n_inducing)
return Xu * scaling
def conditioned_vars(varnames):
""" Decorator for validating attrs that are conditioned on. """
def gp_wrapper(cls):
def make_getter(name):
def getter(self):
value = getattr(self, name, None)
if value is None:
raise AttributeError(("'{}' not set. Provide as argument "
"to condition, or call 'prior' "
"first".format(name.lstrip("_"))))
else:
return value
return getattr(self, name)
return getter
def make_setter(name):
def setter(self, val):
setattr(self, name, val)
return setter
for name in varnames:
getter = make_getter('_' + name)
setter = make_setter('_' + name)
setattr(cls, name, property(getter, setter))
return cls
return gp_wrapper
def plot_gp_dist(ax, samples, x, plot_samples=True, palette="Reds"):
""" A helper function for plotting 1D GP posteriors from trace """
import matplotlib.pyplot as plt
cmap = plt.get_cmap(palette)
percs = np.linspace(51, 99, 40)
colors = (percs - np.min(percs)) / (np.max(percs) - np.min(percs))
samples = samples.T
x = x.flatten()
for i, p in enumerate(percs[::-1]):
upper = np.percentile(samples, p, axis=1)
lower = np.percentile(samples, 100-p, axis=1)
color_val = colors[i]
ax.fill_between(x, upper, lower, color=cmap(color_val), alpha=0.8)
if plot_samples:
# plot a few samples
idx = np.random.randint(0, samples.shape[1], 30)
ax.plot(x, samples[:,idx], color=cmap(0.9), lw=1, alpha=0.1)
|
import numpy as np
import torch
import math
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
import importlib
import copy
import argparse
from torchvision import transforms, datasets
from torch.autograd import Variable
from torch.optim import Optimizer
from torch.optim.sgd import SGD
from torch.nn.utils import clip_grad_norm_
class CIFAR10RandomLabels(datasets.CIFAR10):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10RandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.targets)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
labels = [int(x) for x in labels]
self.targets = labels
class MNISTRandomLabels(datasets.MNIST):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(MNISTRandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.targets)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
labels = [int(x) for x in labels]
self.targets = labels
class Langevin_SGD(Optimizer):
def __init__(self, params, lr, weight_decay=0, nesterov=False, beta=1, K=100, D=50, sigma=0.5, decay_rate = 0.96, decay_steps=2000):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, weight_decay=weight_decay)
self.beta = beta
self.K = K
self.D = D
self.lr = lr
self.sigma = sigma
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.steps = 0
super(Langevin_SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
self.beta = 4 * self.lr / ((0.002*math.sqrt(2) * self.lr)**2)
gradient = []
for group in self.param_groups:
weight_decay = group['weight_decay']
clip_grad_norm_(group['params'], self.K, norm_type=2)
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if len(p.shape) == 1 and p.shape[0] == 1:
p.data.add_(-self.lr, d_p)
else:
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
unit_noise = Variable(p.data.new(p.size()).normal_())
p.data.add_(-self.lr, d_p)
p.data.add_((2*self.lr/self.beta)**0.5, unit_noise)
if torch.norm(p.data).item() >= self.D/2:
p.data = p.data / torch.norm(p.data) * (self.D/2)
gradient = gradient + (d_p.cpu().numpy().flatten().tolist())
if (self.steps > 0 and self.steps % self.decay_steps==0):
self.lr = self.lr * self.decay_rate
self.steps = self.steps + 1
if self.lr < 0.0005:
self.lr = 0.0005
return (np.array(gradient)).flatten()
def train(args, model, device, train_loader, criterion, optimizer, epoch, batchsize, num_batches):
sum_loss, sum_correct = 0, 0
model.train()
gradient_array = np.zeros((num_batches, count_parameters(model)))
for i, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
pred = output.max(1)[1]
sum_correct += pred.eq(target).sum().item()
sum_loss += len(data) * loss.item()
optimizer.zero_grad()
loss.backward()
gradient = optimizer.step()
gradient_array[i] = gradient
return 1 - (sum_correct / len(train_loader.dataset)), sum_loss / len(train_loader.dataset), np.array(gradient_array)
def validate(args, model, device, val_loader, criterion, optimizer, length=0):
sum_loss, sum_correct = 0, 0
model.eval()
total_grad = []
count = 0
for i, (data, target) in enumerate(val_loader):
count = count + 1
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
pred = output.max(1)[1]
sum_correct += pred.eq(target).sum().item()
sum_loss += len(data) * criterion(output, target).item()
optimizer.zero_grad()
loss.backward()
gradient = []
params = list(model.parameters())
for p in params:
if p.grad is None:
continue
d_p = p.grad.data
gradient = gradient + (d_p.cpu().numpy().flatten().tolist())
gradient = (np.array(gradient)).flatten()
if (total_grad == []):
total_grad = gradient
else:
total_grad = total_grad + gradient
if (length == 0):
length = len(val_loader.dataset)
return 1 - (sum_correct / length), sum_loss / length, total_grad / count
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main():
parser = argparse.ArgumentParser(description='Training a fully connected NN with one hidden layer')
parser.add_argument('--no-cuda', default=False, action='store_true',
help='disables CUDA training')
parser.add_argument('--datadir', default='datasets', type=str,
help='path to the directory that contains the datasets (default: datasets)')
parser.add_argument('--dataset', default='CIFAR10', type=str,
help='name of the dataset (options: MNIST | CIFAR10 | CIFAR100 | SVHN, default: CIFAR10)')
parser.add_argument('--model', default='vgg', type=str,
help='architecture (options: fc | vgg, default: vgg)')
parser.add_argument('--epochs', default=1000, type=int,
help='number of epochs to train (default: 1000)')
parser.add_argument('--stopcond', default=0.01, type=float,
help='stopping condtion based on the cross-entropy loss (default: 0.01)')
parser.add_argument('--batchsize', default=64, type=int,
help='input batch size (default: 64)')
parser.add_argument('--learningrate', default=0.05, type=float,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum (default: 0.9)')
parser.add_argument('--label_corrupt_prob', default=0, type=float,
help='label_corrupt_prob (default: 0)')
parser.add_argument('--num_sample_path', default=1, type=float,
help='num_sample_path (default: 0)')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
nchannels, nclasses, img_dim, = 3, 10, 32
if args.dataset == 'MNIST': nchannels = 1
if args.dataset == 'CIFAR100': nclasses = 100
num_sample_path = int(args.num_sample_path)
size_of_training_set = 5000
num_batches = size_of_training_set // args.batchsize
tr_err_list = np.empty((args.epochs, num_sample_path))
tr_loss_list = np.empty((args.epochs, num_sample_path))
val_err_list = np.empty((args.epochs, num_sample_path))
val_loss_list = np.empty((args.epochs, num_sample_path))
variance_list = np.empty((num_batches, args.epochs))
optimizer = None
subset_indices = np.random.choice(50000,size_of_training_set, replace=False)
if args.dataset == 'MNIST':
normalize = transforms.Normalize(mean=[0.131], std=[0.289])
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), normalize])
if args.dataset == 'MNIST':
train_dataset = MNISTRandomLabels(root='./data', train=True, download=True,
transform=transform_train, num_classes=10,
corrupt_prob=args.label_corrupt_prob)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batchsize, sampler=subset_indices, shuffle=False, **kwargs)
val_loader = torch.utils.data.DataLoader(
MNISTRandomLabels(root='./data', train=False,
transform=transform_test, num_classes=10,
corrupt_prob=args.label_corrupt_prob), batch_size=args.batchsize, shuffle=False, **kwargs)
else:
train_dataset = CIFAR10RandomLabels(root='./data', train=True, download=True,
transform=transform_train, num_classes=10,
corrupt_prob=args.label_corrupt_prob)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batchsize, sampler=subset_indices, shuffle=False, **kwargs)
val_loader = torch.utils.data.DataLoader(
CIFAR10RandomLabels(root='./data', train=False,
transform=transform_test, num_classes=10,
corrupt_prob=args.label_corrupt_prob), batch_size=args.batchsize, shuffle=False, **kwargs)
for i in range(num_sample_path):
model = getattr(importlib.import_module('models.{}'.format(args.model)), 'Network')(nchannels, nclasses)
model = model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = Langevin_SGD(model.parameters(), args.learningrate)
for epoch in range(0, args.epochs):
tr_err, tr_loss, gradient = train(args, model, device, train_loader, criterion, optimizer, epoch, args.batchsize, num_batches)
tr_err, tr_loss, _ = validate(args, model, device, train_loader, criterion, optimizer, length=size_of_training_set)
val_err, val_loss, _ = validate(args, model, device, val_loader, criterion, optimizer)
tr_err_list[epoch, i] = tr_err
tr_loss_list[epoch, i] = tr_loss
val_err_list[epoch, i] = val_err
val_loss_list[epoch, i] = val_loss
for t in range(gradient.shape[0]):
filename = "gradient/" + str(t) + "_" + str(epoch) + "_" + str(i)
np.save(filename, gradient[t])
print("epoch " + str(epoch+1) + " completed")
print("Sample path " + str(i+1) + " completed")
average_tr_err = np.mean(tr_err_list, axis=1)
average_tr_loss = np.mean(tr_loss_list, axis=1)
average_val_err = np.mean(val_err_list, axis=1)
average_val_loss = np.mean(val_loss_list, axis=1)
for i in range(num_batches):
for j in range(args.epochs):
sum_var = 0
bar = 0
for k in range(num_sample_path):
filename = "gradient/" + str(i) + "_" + str(j) + "_" + str(k) + ".npy"
bar = bar + np.load(filename)
bar = bar / num_sample_path
for k in range(num_sample_path):
filename = "gradient/" + str(i) + "_" + str(j) + "_" + str(k) + ".npy"
foo = np.load(filename)
sum_var = sum_var + np.linalg.norm(foo-bar)**2
sum_var = sum_var/num_sample_path
variance_list[i,j] = sum_var
print('Print the average result from multiple sample path:')
left_hand = []
right_hand = []
surrogate_loss = []
for epoch in range(0, args.epochs):
tr_err = average_tr_err[epoch]
tr_loss = average_tr_loss[epoch]
val_err = average_val_err[epoch]
val_loss = average_val_loss[epoch]
bound_var = 0
for m in range(num_batches):
sum_var = 0
for t in range(m, num_batches * (epoch+1), num_batches):
eta = args.learningrate * (optimizer.decay_rate ** (t // optimizer.decay_steps))
beta = 4 * eta / ((0.002*math.sqrt(2) * eta)**2)
product_var = eta * beta * variance_list[m, t // num_batches]
for t_q in range(t+1, (epoch+1) * num_batches):
if not (t_q % num_batches == m):
product_var = product_var
sum_var = sum_var + product_var
bound_var = bound_var + math.sqrt(sum_var)
bound_var = bound_var * math.sqrt(2 * args.batchsize) * 0.5 / (2.0 * size_of_training_set)
left_hand.append(abs(average_tr_err[epoch] - average_val_err[epoch]))
surrogate_loss.append(abs(average_tr_loss[epoch] - average_val_loss[epoch]))
right_hand.append(bound_var)
print(f'Epoch: {epoch + 1}/{args.epochs}\t Average Training loss: {tr_loss:.8f}', f'Average Training error: {tr_err:.8f}\t Average Validation error: {val_err:.8f}', f'Average Validation loss: {val_loss:.8f}\t Average Bound: {bound_var:.8f}\t')
acc_hand = average_tr_err
np.save("left_hand", left_hand)
np.save("right_hand", right_hand)
np.save("acc", acc_hand)
if __name__ == '__main__':
main()
|
from datetime import datetime
from olympia.addons.models import Addon
from olympia.api.serializers import BaseESSerializer
class BasicSerializer(BaseESSerializer):
class Meta:
model = Addon
fields = ()
def test_handle_date_strips_microseconds():
serializer = BasicSerializer()
date = datetime.utcnow()
assert date.microsecond
assert serializer.handle_date(date.isoformat()) == date.replace(microsecond=0)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProfileRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(help_text='Username if this user is created.', max_length=100, null=True)),
('first_name', models.CharField(help_text='First name if user is created.', max_length=100)),
('last_name', models.CharField(help_text='Last name if user is created.', max_length=100)),
('email', models.CharField(help_text='E-mail address if user is created.', max_length=255)),
('request_date', models.DateTimeField(help_text='Whether this request has been granted.', auto_now_add=True)),
('affiliation', models.CharField(default='R', help_text="User's affiliation with the house.", max_length=1, choices=[('R', 'Current Resident'), ('', 'Current Boarder'), ('A', 'Alumna/Alumnus')])),
('password', models.CharField(help_text="User's password. Stored as hash", max_length=255, blank=True)),
('provider', models.CharField(max_length=32, blank=True)),
('uid', models.CharField(max_length=255, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('current_room', models.CharField(help_text="User's current room number", max_length=100, null=True, blank=True)),
('former_rooms', models.CharField(help_text="List of user's former room numbers", max_length=100, null=True, blank=True)),
('former_houses', models.CharField(help_text="List of user's former BSC houses", max_length=100, null=True, blank=True)),
('phone_number', models.CharField(help_text="User's phone number", max_length=20, null=True, blank=True)),
('email_visible', models.BooleanField(default=False, help_text='Whether the email is visible in the directory')),
('phone_visible', models.BooleanField(default=False, help_text='Whether the phone number is visible in the directory')),
('status', models.CharField(default='R', help_text='Member status (resident, boarder, alumnus)', max_length=1, choices=[('R', 'Current Resident'), ('', 'Current Boarder'), ('A', 'Alumna/Alumnus')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
"""This is the docstring"""
import datetime
from flask import jsonify, request, g
from ..utils.auth import requires_auth
from ..utils.tools import fileUpload, enrich_posts
from ..utils.db_handler import insert_post_to_db, find_related_posts, like_post
from .. import app
@app.route('/post', methods=['GET'])
@requires_auth
def get_posts():
"""Hei"""
email = g.current_user["email"]
posts = find_related_posts(email)
if posts:
enriched_posts = enrich_posts(posts)
return jsonify(enriched_posts), 200
return jsonify(error='Could not fetch any posts'), 404
@app.route('/post', methods=['POST'])
@requires_auth
def post_posts():
"""sefsef"""
try:
_id = str(g.current_user["_id"])
email = str(g.current_user["email"])
#print("The current user is: " + email)
#print(request.files)
now = datetime.datetime.now()
post = {
'user_id' : _id,
'user_email':email,
'created' : now,
'text' : "",
'images' : [],
'comments' : [],
'liked_by': []
}
if request.form['content_text']:
post['text'] = request.form['content_text']
if request.files['file_0']:
for file in request.files:
filepath, filename = fileUpload(request.files[file])
post['images'].append({
'filepath' : filepath,
'filename' : filename
})
post_id = insert_post_to_db(post)
if post_id:
#If the post is successful make sure to insert a post reference
#into users posts
posts = find_related_posts(email)
enriched_posts = enrich_posts(posts)
return jsonify(enriched_posts), 200
return jsonify(error='Something went horribly wrong.'), 400
except ValueError as err:
return jsonify(error="Request did not receive the expected value: " + err), 500
@app.route('/likepost', methods=['POST'])
@requires_auth
def like_posts():
"""sefsef"""
email = g.current_user["email"]
incoming = request.get_json()
post = like_post(incoming['post_id'], email)
if post:
posts = find_related_posts(email)
enriched_posts = enrich_posts(posts)
if posts:
return jsonify(enriched_posts), 200
return jsonify(error='Could not fetch any posts'), 404
|
from abc import ABC, abstractmethod
import numpy as np
import copy
from rl.tools.utils.mvavg import ExpMvAvg, PolMvAvg
from rl.tools.utils.misc_utils import deepcopy_from_list
class OnlineNormalizer(ABC):
"""
A normalizer that adapts to streaming observations. Given input x, it computes
x_cooked = clip((x-bias)/scale, thre)
It should support copy.deepcopy.
"""
def __init__(self, shape, unscale=False, unbias=False, clip_thre=None):
self._shape = shape
self._unscale = unscale
self._unbias = unbias
assert clip_thre is None or type(clip_thre) is float, 'Invalid clip_thre.'
self._clip_thre = clip_thre
self._initialized = False
def normalize(self, x):
"""
Given input x, it computes
x_cooked = clip((x-bias)/scale, thre)
If unscale/unbias is True, it removes the scaling/bias after clipping.
"""
if not self._initialized:
return x
# do something
if self._clip_thre is None:
if not self._unbias:
x = x - self.bias
if not self._unscale:
x = x / self.scale
else:
# need to first scale it before clipping
x = (x - self.bias) / self.scale
x = np.clip(x, -self._clip_thre, self._clip_thre)
# check if we need to scale it back
if self._unscale:
x = x * self.scale
if self._unbias:
x = x + self.bias
else:
if self._unbias:
x = x + self.bias / self.scale
return x
def reset(self):
self._initialized = False
self._reset()
@abstractmethod
def _reset(self):
""" Reset the normalizer to its initial state. """
@property
@abstractmethod
def bias(self):
pass
@property
@abstractmethod
def scale(self):
pass
def update(self, *args, **kwargs):
self._update(*args, **kwargs)
self._initialized = True
@abstractmethod
def _update(self, *args, **kwargs):
""" Update data for defining bias and scale """
def assign(self, other):
assert type(self) == type(other)
deepcopy_from_list(self, other, self.__dict__.keys())
class NormalizerId(OnlineNormalizer):
# Just an identity map
def __init__(self, shape, unscale=False, unbias=False, clip_thre=None, **kwargs):
super().__init__(shape, unscale=True, unbias=True, clip_thre=None)
def _reset(self):
pass
def bias(self):
return np.zeros(self._shape)
def scale(self):
return np.ones(self._shape)
def _update(self):
pass
class NormalizerStd(OnlineNormalizer):
def __init__(self, shape, unscale=False, unbias=False, clip_thre=None,
rate=0, momentum=None, eps=1e-6):
"""
An online normalizer based on whitening.
shape: None or an tuple specifying each dimension
momentum: None for moving average
[0,1) for expoential average
1 for using instant update
rate: decides the weight of new observation as itr**rate
"""
super().__init__(shape, unscale=unscale, unbias=unbias, clip_thre=clip_thre)
if momentum is None:
self._mvavg_init = lambda: PolMvAvg(np.zeros(self._shape), power=rate)
else:
assert momentum <= 1.0 and momentum >= 0.0
self._mvavg_init = lambda: ExpMvAvg(np.zeros(self._shape), rate=momentum)
self.reset()
self._eps = eps
def _reset(self):
self._mean = self._mvavg_init()
self._mean_of_sq = self._mvavg_init()
@property
def bias(self):
return self._mean.val
@property
def scale(self):
return np.maximum(self.std, self._eps)
@property
def std(self):
variance = self._mean_of_sq.val - np.square(self._mean.val)
return np.sqrt(variance)
def _update(self, x):
if np.shape(x) == ():
x = np.array(x)[np.newaxis]
# observed stats
new_mean = np.mean(x, axis=0)
new_mean_of_sq = np.mean(np.square(x), axis=0)
self._mean.update(new_mean)
self._mean_of_sq.update(new_mean_of_sq)
class NormalizerMax(OnlineNormalizer):
def __init__(self, shape, unscale=False, unbias=False, clip_thre=None,
rate=0, momentum=None, eps=1e-6):
# Args:
# momentum: None for moving average
# [0,1) for expoential average
# 1 for using instant update
# rate: decide the weight of new observation as itr**rate
super().__init__(shape, unscale=unscale, unbias=unbias, clip_thre=clip_thre)
self._norstd = NormalizerStd(shape, unscale=unscale, unbias=unbias, clip_thre=clip_thre,
rate=rate, momentum=momentum, eps=eps)
self.reset()
self._eps = eps
def _reset(self):
self._norstd.reset()
self._upper_bound = None
self._lower_bound = None
@property
def bias(self):
return 0.5 * self._upper_bound + 0.5 * self._lower_bound
@property
def scale(self):
return np.maximum(self._upper_bound - self.bias, self._eps)
def _update(self, x):
# update stats
self._norstd.update(x)
# update clipping
scale_candidate = self._norstd.std
upper_bound_candidate = self._norstd.bias + self._norstd.scale
lower_bound_candidate = self._norstd.bias - self._norstd.scale
if not self._initialized:
self._upper_bound = upper_bound_candidate
self._lower_bound = lower_bound_candidate
else:
self._upper_bound = np.maximum(self._upper_bound, upper_bound_candidate)
self._lower_bound = np.minimum(self._lower_bound, lower_bound_candidate)
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Sample tests with a layer that can't be torn down
"""
import unittest
import doctest
class Layer:
def setUp(self):
pass
setUp = classmethod(setUp)
def tearDown(self):
raise NotImplementedError
tearDown = classmethod(tearDown)
class TestSomething(unittest.TestCase):
layer = Layer
def test_something(self):
import pdb; pdb.set_trace()
def test_something2(self):
import pdb; pdb.set_trace()
def test_something3(self):
import pdb; pdb.set_trace()
def test_something4(self):
import pdb; pdb.set_trace()
def test_something5(self):
f()
def f():
import pdb; pdb.set_trace()
def test_set_trace():
"""
>>> if 1:
... x = 1
... import pdb; pdb.set_trace()
"""
def test_set_trace2():
"""
>>> f()
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSomething))
d = doctest.DocTestSuite()
d.layer = Layer
suite.addTest(d)
return suite
if __name__ == '__main__':
unittest.main()
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2018 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for include/libavutil/avutil.h
'''
from __future__ import unicode_literals
from ctypes import (c_int, c_uint16, c_int32, c_int64, c_uint32, c_uint64,
c_uint8, c_int8, c_uint, c_double, c_float, c_ubyte, c_size_t, c_char,
c_char_p, c_void_p, addressof, byref, cast, POINTER, CFUNCTYPE, Structure,
Union, create_string_buffer, memmove)
import pyglet
import pyglet.lib
avutil = pyglet.lib.load_library(
'avutil',
win32='avutil-56',
darwin='avutil.56'
)
AVMEDIA_TYPE_UNKNOWN = -1
AVMEDIA_TYPE_VIDEO = 0
AVMEDIA_TYPE_AUDIO = 1
AVMEDIA_TYPE_DATA = 2
AVMEDIA_TYPE_SUBTITLE = 3
AVMEDIA_TYPE_ATTACHMENT = 4
AVMEDIA_TYPE_NB = 5
AV_SAMPLE_FMT_U8 = 0
AV_SAMPLE_FMT_S16 = 1
AV_SAMPLE_FMT_S32 = 2
AV_SAMPLE_FMT_FLT = 3
AV_SAMPLE_FORMAT_DOUBLE = 4
AV_SAMPLE_FMT_U8P = 5
AV_SAMPLE_FMT_S16P = 6
AV_SAMPLE_FMT_S32P = 7
AV_SAMPLE_FMT_FLTP = 8
AV_SAMPLE_FMT_DBLP = 9
AV_SAMPLE_FMT_S64 = 10
AV_SAMPLE_FMT_S64P = 11
AV_NUM_DATA_POINTERS = 8
AV_PIX_FMT_RGB24 = 2
AV_PIX_FMT_ARGB = 25
AV_PIX_FMT_RGBA = 26
class AVBuffer(Structure):
_fields_ = [
('data', POINTER(c_uint8)),
('size', c_int),
#.. more
]
class AVBufferRef(Structure):
_fields_ = [
('buffer', POINTER(AVBuffer)),
('data', POINTER(c_uint8)),
('size', c_int)
]
class AVDictionaryEntry(Structure):
_fields_ = [
('key', c_char_p),
('value', c_char_p)
]
class AVDictionary(Structure):
_fields_ = [
('count', c_int),
('elems', POINTER(AVDictionaryEntry))
]
class AVClass(Structure): pass
class AVRational(Structure):
_fields_ = [
('num', c_int),
('den', c_int)
]
class AVFrameSideData(Structure): pass
class AVFrame(Structure):
_fields_ = [
('data', POINTER(c_uint8) * AV_NUM_DATA_POINTERS),
('linesize', c_int * AV_NUM_DATA_POINTERS),
('extended_data', POINTER(POINTER(c_uint8))),
('width', c_int),
('height', c_int),
('nb_samples', c_int),
('format', c_int),
('key_frame', c_int),
('pict_type', c_int),
('sample_aspect_ratio', AVRational),
('pts', c_int64),
('pkt_pts', c_int64), #Deprecated
('pkt_dts', c_int64),
('coded_picture_number', c_int),
('display_picture_number', c_int),
('quality', c_int),
('opaque', c_void_p),
('error', c_uint64 * AV_NUM_DATA_POINTERS), #Deprecated
('repeat_pict', c_int),
('interlaced_frame', c_int),
('top_field_first', c_int),
('palette_has_changed', c_int),
('reordered_opaque', c_int64),
('sample_rate', c_int),
('channel_layout', c_uint64),
('buf', POINTER(AVBufferRef) * AV_NUM_DATA_POINTERS),
('extended_buf', POINTER(POINTER(AVBufferRef))),
('nb_extended_buf', c_int),
('side_data', POINTER(POINTER(AVFrameSideData))),
('nb_side_data', c_int),
('flags', c_int),
('color_range', c_int),
('color_primaries', c_int),
('color_trc', c_int),
('colorspace', c_int),
('chroma_location', c_int),
('best_effort_timestamp', c_int64),
('pkt_pos', c_int64),
('pkt_duration', c_int64),
#!
('metadata', POINTER(AVDictionary)),
('decode_error_flags', c_int),
('channels', c_int),
('pkt_size', c_int),
('qscale_table', POINTER(c_int8)), #Deprecated
('qstride', c_int), #Deprecated
('qscale_type', c_int), #Deprecated
('qp_table_buf', POINTER(AVBufferRef)), #Deprecated
('hw_frames_ctx', POINTER(AVBufferRef)),
('opaque_ref', POINTER(AVBufferRef)),
('crop_top', c_size_t), # video frames only
('crop_bottom', c_size_t), # video frames only
('crop_left', c_size_t), # video frames only
('crop_right', c_size_t), # video frames only
('private_ref', POINTER(AVBufferRef)),
]
AV_NOPTS_VALUE = -0x8000000000000000
AV_TIME_BASE = 1000000
AV_TIME_BASE_Q = AVRational(1, AV_TIME_BASE)
avutil.av_version_info.restype = c_char_p
avutil.av_dict_get.restype = POINTER(AVDictionaryEntry)
avutil.av_dict_get.argtypes = [POINTER(AVDictionary),
c_char_p, POINTER(AVDictionaryEntry),
c_int]
avutil.av_rescale_q.restype = c_int64
avutil.av_rescale_q.argtypes = [c_int64, AVRational, AVRational]
avutil.av_samples_get_buffer_size.restype = c_int
avutil.av_samples_get_buffer_size.argtypes = [POINTER(c_int),
c_int, c_int, c_int]
avutil.av_frame_alloc.restype = POINTER(AVFrame)
avutil.av_frame_free.argtypes = [POINTER(POINTER(AVFrame))]
avutil.av_get_default_channel_layout.restype = c_int64
avutil.av_get_default_channel_layout.argtypes = [c_int]
avutil.av_get_bytes_per_sample.restype = c_int
avutil.av_get_bytes_per_sample.argtypes = [c_int]
avutil.av_strerror.restype = c_int
avutil.av_strerror.argtypes = [c_int, c_char_p, c_size_t]
avutil.av_frame_get_best_effort_timestamp.restype = c_int64
avutil.av_frame_get_best_effort_timestamp.argtypes = [POINTER(AVFrame)]
avutil.av_image_fill_arrays.restype = c_int
avutil.av_image_fill_arrays.argtypes = [POINTER(c_uint8) * 4, c_int * 4,
POINTER(c_uint8), c_int, c_int, c_int, c_int]
avutil.av_dict_set.restype = c_int
avutil.av_dict_set.argtypes = [POINTER(POINTER(AVDictionary)),
c_char_p, c_char_p, c_int]
avutil.av_dict_free.argtypes = [POINTER(POINTER(AVDictionary))]
__all__ = [
'avutil',
'AVMEDIA_TYPE_UNKNOWN',
'AVMEDIA_TYPE_VIDEO',
'AVMEDIA_TYPE_AUDIO',
'AVMEDIA_TYPE_DATA',
'AVMEDIA_TYPE_SUBTITLE',
'AVMEDIA_TYPE_ATTACHMENT',
'AVMEDIA_TYPE_NB',
'AV_SAMPLE_FMT_U8',
'AV_SAMPLE_FMT_S16',
'AV_SAMPLE_FMT_S32',
'AV_SAMPLE_FMT_FLT',
'AV_SAMPLE_FORMAT_DOUBLE',
'AV_SAMPLE_FMT_U8P',
'AV_SAMPLE_FMT_S16P',
'AV_SAMPLE_FMT_S32P',
'AV_SAMPLE_FMT_FLTP',
'AV_SAMPLE_FMT_DBLP',
'AV_SAMPLE_FMT_S64',
'AV_SAMPLE_FMT_S64P',
'AV_NUM_DATA_POINTERS',
'AV_PIX_FMT_RGB24',
'AV_PIX_FMT_ARGB',
'AV_PIX_FMT_RGBA',
'AV_NOPTS_VALUE',
'AV_TIME_BASE',
'AV_TIME_BASE_Q',
'AVFrame',
'AVRational',
'AVDictionary',
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Friday Feb 20 2020
This code was implemented by
Louis Weyland, Floris Fok and Julien Fer
"""
# Import built-in libs
import math
# Import 3th parties libraries
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
class BinTreeOption:
def __init__(
self, N, T, S0, sigma, r, K,
market="EU", option_type="call", array_out=False
):
"""
OOP representation of a binomial option tree.
Input:
N = total time steps (integer)
T = maturity option in years (numeric)
S0 = initial stock price (numeric)
r = risk-free rate (numeric)
K = strike price option (numeric)
market = market type (EU or USA)
option_type = determines option type (call or put)
array_out = False gives only resulting values, True gives full trees
Output:
returns an object representation with an already created price
tree. It also contains methods to determine option price
development and the hedging strategy
"""
# Init
self.N = N
self.T = T
self.S0 = S0
self.sigma = sigma
self.r = r
self.K = K
self.market = market.upper()
self.option_type = option_type.lower()
self.array_out = array_out
# Checks if market type and option type are valid
assert self.market in ["EU", "USA"], "Market not found. Choose EU or USA"
assert self.option_type in ["call", "put"], "Non-existing option type."
# Setup parameters for movements binomial tree
self.dt = T / N
self.u = np.exp(sigma * np.sqrt(self.dt))
self.d = 1 / self.u
self.p = (np.exp(r * self.dt) - self.d) / (self.u - self.d)
self.discount = np.exp(-r * self.dt)
# Create price tree and initialize option tree
self.price_tree = np.zeros((N + 1, N + 1))
self.create_price_tree()
self.option = np.zeros((N + 1, N + 1))
# Create hedging tree and theoretical hedging tree
self.delta = np.zeros((N, N))
self.t_delta = np.zeros((N, N))
def create_price_tree(self):
"""
Determines stock price at every time step.
"""
for i in range(self.N + 1):
for j in range(i + 1):
self.price_tree[j, i] = self.S0 * \
(self.u ** (i - j)) * (self.d ** j)
def determine_price(self):
"""
Determines option price and hedging strategy at every time step
depending on the option type and market.
"""
# Sets option price at maturity and apply recursive scheme
# for European call option
if self.market == "EU" and self.option_type == "call":
self.option[:, self.N] = np.maximum(
np.zeros(self.N + 1), self.price_tree[:, self.N] - self.K
)
self.recursive_eu_call()
# Sets option price at maturity and apply recursive scheme
# for European put option
elif self.market == "EU" and self.option_type == "put":
self.option[:, self.N] = np.maximum(
np.zeros(self.N + 1), self.K - self.price_tree[:, self.N]
)
self.recursive_eu_put()
# Sets option price at maturity and apply recursive scheme
# for American call option
elif self.market == "USA" and self.option_type == "call":
self.option[:, self.N] = np.maximum(
np.zeros(self.N + 1), self.price_tree[:, self.N] - self.K
)
self.recursive_usa_call()
# Sets option price at maturity and apply recursive scheme
# for American put option
elif self.market == "USA" and self.option_type == "put":
self.option[:, self.N] = np.maximum(
np.zeros(self.N + 1), self.K - self.price_tree[:, self.N])
self.recursive_usa_put()
# Ensures full output is given if asked by user.
# Otherwise it only returns the variables of interest at the spot time.
if self.array_out and self.market == "EU":
return [self.option[0, 0], self.delta[0, 0], self.t_delta[0, 0],
self.price_tree, self.option, self.delta, self.t_delta]
elif self.array_out and self.market == "USA":
return [self.option[0, 0], self.delta[0, 0],
self.price_tree, self.option, self.delta]
elif not self.array_out and self.market == "EU":
return self.option[0, 0], self.delta[0, 0], self.t_delta[0, 0]
return self.option[0, 0], self.delta[0, 0]
def recursive_eu_call(self):
"""
Recursive scheme for an Europen call option.
"""
# Time starts at maturity (only necessary for theoretical hedging)
t = self.T
# Start scheme
for i in np.arange(self.N - 1, -1, -1):
t -= self.dt
# Determines option price, hedging strategy and theoretical hedging
# strartegy for each node in current layer
for j in np.arange(0, i + 1):
self.option[j, i] = (self.discount * (self.p *
self.option[j, i + 1] + (1 - self.p) *
self.option[j + 1, i + 1]))
self.delta[j, i] = ((self.option[j, i + 1] -
self.option[j + 1, i + 1]) /
(self.price_tree[j, i + 1] -
self.price_tree[j + 1, i + 1]))
d1 = (np.log(self.S0 / self.K) + (self.r + 0.5 * self.sigma ** 2) * (self.T - t)) / (self.sigma * np.sqrt(self.T - t))
self.t_delta[j, i] = st.norm.cdf(d1, 0.0, 1.0)
def recursive_eu_put(self):
"""
Recursive scheme for an Europen put option.
"""
# Time starts at maturity (only necessary for theoretical hedging)
t = self.T
# Start scheme
for i in np.arange(self.N - 1, -1, -1):
t -= self.dt
# Determines option price, hedging strategy and theoretical hedging
# strartegy for each node in current layer
for j in np.arange(0, i + 1):
self.option[j, i] = (self.discount * (self.p *
self.option[j, i + 1] + (1 - self.p) *
self.option[j + 1, i + 1]))
self.delta[j, i] = ((self.option[j, i + 1] -
self.option[j + 1, i + 1]) /
(self.price_tree[j, i + 1] -
self.price_tree[j + 1, i + 1]))
d1 = (np.log(self.S0 / self.K) + (self.r + 0.5 * self.sigma ** 2) * (self.T - t)) / (self.sigma * np.sqrt(self.T - t))
self.t_delta[j, i] = -st.norm.cdf(-d1, 0.0, 1.0)
def recursive_usa_call(self):
"""
Recursive scheme for an American call option
"""
# Start scheme
for i in np.arange(self.N - 1, -1, -1):
# Determines option price and hedging strategy
# for each node in current layer
for j in np.arange(0, i + 1):
self.option[j, i] = max([0, self.price_tree[j, i] - self.K,
self.discount *
(self.p * self.option[j, i + 1] +
(1 - self.p) * self.option[j + 1, i + 1])])
self.delta[j, i] = ((self.option[j, i + 1] -
self.option[j + 1, i + 1]) /
(self.price_tree[j, i + 1] -
self.price_tree[j + 1, i + 1]))
def recursive_usa_put(self):
"""
Recursive scheme for an American put option
"""
# Start scheme
for i in np.arange(self.N - 1, -1, -1):
# Determines option price and hedging strategy
# for each node in current layer
for j in np.arange(0, i + 1):
self.option[j, i] = max([0, self.K - self.price_tree[j, i],
self.discount *
(self.p * self.option[j, i + 1] +
(1 - self.p) * self.option[j + 1, i + 1])])
self.delta[j, i] = ((self.option[j, i + 1] -
self.option[j + 1, i + 1]) /
(self.price_tree[j, i + 1] -
self.price_tree[j + 1, i + 1]))
class BlackScholes:
def __init__(self, T, S0, K, r, sigma, steps=1):
self.T = T
self.S0 = S0
self.K = K
self.r = r
self.sigma = sigma
self.steps = steps
self.dt = T / steps
self.price = S0
self.price_path = np.zeros(steps)
self.delta_list = None
self.x_hedge = None
def call_price(self, t=0):
"""
"""
d1 = (np.log(self.S0 / self.K) + (self.r + 0.5 * self.sigma ** 2)
* (self.T - t)) / (self.sigma * np.sqrt(self.T - t))
d2 = d1 - self.sigma * np.sqrt(self.T - t)
call = (self.S0 * st.norm.cdf(d1, 0.0, 1.0) - self.K *
np.exp(-self.r * self.T) * st.norm.cdf(d2, 0.0, 1.0))
return call
def put_price(self, t=0):
"""
"""
d1 = (np.log(self.S0 / self.K) + (self.r + 0.5 * self.sigma ** 2)
* (self.T - t)) / (self.sigma * np.sqrt(self.T - t))
d2 = d1 - self.sigma * np.sqrt(self.T - t)
put = ((self.K * np.exp(-self.r * self.T)
* st.norm.cdf(-d2, 0.0, 1.0)) - self.S0 *
st.norm.cdf(-d1, 0.0, 1.0))
return put
def asian_call_price(self, t=0) :
"""
"""
N = self.steps
sigma = self.sigma * np.sqrt(((N + 1) * (2 * N + 1)) / (6 * N ** 2))
b = ((N + 1) / (2 * N)) * (self.r - 0.5 * (sigma ** 2))
d1 = ((np.log(self.K / self.S0) + (b + 0.5 * self.sigma ** 2) * (self.T - t)) /
(sigma * np.sqrt(self.T - t)))
d2 = d1 - sigma * np.sqrt(self.T - t)
call = (self.S0 * np.exp((b - self.r) * self.T) * st.norm.cdf(d1, 0.0, 1.0) - self.K *
np.exp(-self.r * self.T) * st.norm.cdf(d2, 0.0, 1.0))
return call
def asian_put_price(self, t=0) :
"""
"""
N = self.steps
sigma = self.sigma * np.sqrt(((N + 1) * (2 * N + 1)) / (6 * N ** 2))
b = ((N + 1) / (2 * N)) * (self.r - 0.5 * (sigma ** 2))
d1 = ((np.log(self.K / self.S0) + (b + 0.5 * sigma ** 2) * (self.T - t)) /
(sigma * np.sqrt(self.T - t)))
d2 = d1 - (sigma * np.sqrt(self.T - t))
put = self.K * np.exp(-self.r * self.T) * st.norm.cdf(-d2, 0.0, 1.0) - (
self.S0 * np.exp((b - self.r) * self.T) * st.norm.cdf(-d1, 0.0, 1.0))
return put
def create_price_path(self):
"""
"""
for i in range(self.steps):
self.price_path[i] = self.price
dS = self.r * self.price * self.dt + self.sigma * \
self.price * np.random.normal(0, 1) * np.sqrt(self.dt)
self.price += dS
def create_hedge(self, steps=1, hedge_setting='Call'):
'''
Simulate hedging over the given price path and returns a profit
'''
# time steps
x_hedge = [j / steps for j in range(steps)]
# Check if price path is made
if self.price_path[-1] == 0:
self.create_price_path()
# corrected current price for hedge time intervals and all deltas for a given time
hedge_price = [j for n, j in enumerate(self.price_path) if int(n % (self.steps / steps)) == 0]
delta_list = [self.hedge(t, s, hedge_setting) for t, s in zip(x_hedge, hedge_price)]
# New time step and interest for given interval
dt = self.T / steps
interest = self.r * dt
# set iterables
delta_t = 0
current_stock_price = 0
# set loop variables
previous_delta = delta_t
bank = self.call_price()
# loop over the time step and hedge for every time step
for delta_t, current_stock_price in zip(delta_list, hedge_price):
cost = (delta_t - previous_delta) * current_stock_price
bank = bank * math.exp(interest) - cost
previous_delta = delta_t
# Calculate the profit when t = T
profit = bank + (current_stock_price * delta_t) - max([current_stock_price - self.K, 0])
# Save values for later evaluations
self.delta_list = delta_list
self.x_hedge = x_hedge
self.hedge_price = hedge_price
# return profit made during the hedging
return profit
def plot_price_path(self, hedge_plot=True):
'''
Eneables to plot both price path and delta over time
'''
fig, ax1 = plt.subplots()
# Get price time steps
x_price = [i / self.steps for i in range(self.steps)]
# Plot the price over time on the first axis
color = 'tab:red'
ax1.set_xlabel('years')
ax1.set_ylabel('Price', color=color)
ax1.plot(x_price, self.price_path, color=color,
label="Discritized Black Scholes")
ax1.tick_params(axis='y', labelcolor=color)
if hedge_plot:
# Instantiate a second axes that shares the same x-axis
ax2 = ax1.twinx()
color = 'tab:blue'
# we already handled the x-label with ax1
ax2.set_ylabel('Delta', color=color)
# Plot the delta
ax2.plot(self.x_hedge, self.delta_list, color=color, label='Hedge delta')
ax2.tick_params(axis='y', labelcolor=color)
# Finalize the plot and show
plt.title("Stock price and Delta development over time")
fig.tight_layout()
plt.show()
def hedge(self, t, S, hedge_setting='call'):
'''
Calculate the delta at a given time
'''
# Take d1 from black-scholes
d1 = (np.log(S / self.K) + (self.r + 0.5 * self.sigma ** 2)
* (self.T - t)) / (self.sigma * np.sqrt(self.T - t))
# Calculate derivitive for call and put
if hedge_setting.lower() == 'call':
return st.norm.cdf(d1, 0.0, 1.0)
elif hedge_setting.lower() == 'put':
return -st.norm.cdf(d1, 0.0, 1.0)
else:
print("Setting not found")
return None
if __name__ == "__main__":
for i in range(1):
B = BlackScholes(1, 100, 99, 0.06, 0.20, 50)
B.create_price_path()
# p = B.create_hedge(200, 'put')
p = 0
# B.plot_price_path()
p2 = B.create_hedge(50, 'call')
B.plot_price_path()
plt.show()
print(B.put_price(), B.call_price())
print(([round(p, 1), round(p2, 1)]))
tree_test = BinTreeOption(50, 1, 100, 0.2, 0.06, 99,
market="EU", option_type="call", array_out=True)
price, delta, t_delta, price_tree, option, delta_tree, t_delta_tree = tree_test.determine_price()
print("Price\n", price)
print("===============================")
print("Delta\n", delta)
print("===============================")
print("Theoretical Delta\n", t_delta)
print("===============================")
print("Price Tree\n", price_tree)
print("===============================")
print("Option Tree\n", option)
print("===============================")
print("Delta Tree\n", delta_tree)
print("===============================")
print("Theoretical Delta Tree\n", t_delta_tree)
print("===============================")
# tree1 = BinTreeOption(5, 5 / 12, 50, 0.4, 0.1, 50,
# market="USA", option_type="put", array_out=False)
# tree2 = BinTreeOption(5, 5 / 12, 50, 0.4, 0.1, 50,
# market="EU", option_type="put", array_out=False)
# tree3 = BinTreeOption(50, 1, 100, 0.2, 0.06, 99,
# market="USA", option_type="call", array_out=False)
# tree4 = BinTreeOption(50, 1, 100, 0.2, 0.06, 99,
# market="EU", option_type="call", array_out=False)
# tree5 = BinTreeOption(50, 1, 100, 0.2, 0.06, 99,
# market="USA", option_type="put", array_out=False)
# tree6 = BinTreeOption(50, 1, 100, 0.2, 0.06, 99,
# market="EU", option_type="put", array_out=False)
# trees = [tree1, tree2, tree3, tree4, tree5, tree6]
# for i, tree in enumerate(trees):
# price, delta = tree.determine_price()
# print(f"Price of Tree {i + 1} is", price)
# print(f"Delta of Tree {i + 1} is", delta)
# print("===============================================")
# bs_eu = BlackScholes(1, 100, 99, 0.06, 0.2, steps=50)
# bs_eu.create_price_path()
# bs_eu.plot_price_path(hedge_setting="Call", hedge_plot=True)
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED B Y THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import errno
import json
import os
import shutil
import subprocess
import sys
from vistrails.core.modules.vistrails_module import Module, ModuleError, IncompleteImplementation, new_module
import vistrails.core.modules.module_registry
from vistrails.core import debug
from vistrails.core.packagemanager import get_package_manager
import vistrails.core.system
from vistrails.core.system import packages_directory, vistrails_root_directory
import identifiers
cl_tools = {}
class CLTools(Module):
""" CLTools is the base Module.
We will create a SUDSWebService Module for each method published by
the web service.
"""
def compute(self):
raise IncompleteImplementation # pragma: no cover
SUFFIX = '.clt'
DEFAULTFILESUFFIX = '.cld'
def _eintr_retry_call(func, *args):
"""Fixes OSErrors and IOErrors
From: http://code.google.com/p/seascope/source/detail?spec=svn8dbe5e23d41db673727ce90fd338e9a43f8877e8&name=8dbe5e23d41d&r=8dbe5e23d41db673727ce90fd338e9a43f8877e8
IOError added
"""
while True:
try:
return func(*args)
except (OSError, IOError), e: # pragma: no cover
if e.errno == errno.EINTR:
continue
raise
def _add_tool(path):
# first create classes
tool_name = os.path.basename(path)
if not tool_name.endswith(SUFFIX): # pragma: no cover
return
(tool_name, _) = os.path.splitext(tool_name)
if tool_name in cl_tools: # pragma: no cover
debug.critical("Package CLTools already added: '%s'" % tool_name)
try:
conf = json.load(open(path))
except ValueError as exc: # pragma: no cover
debug.critical("Package CLTools could not parse '%s'" % path, exc)
return
def compute(self):
""" 1. read inputs
2. call with inputs
3. set outputs
"""
# add all arguments as an unordered list
args = [self.conf['command']]
file_std = 'options' in self.conf and 'std_using_files' in self.conf['options']
fail_with_cmd = 'options' in self.conf and 'fail_with_cmd' in self.conf['options']
setOutput = [] # (name, File) - set File contents as output for name
open_files = []
stdin = None
kwargs = {}
for type, name, klass, options in self.conf['args']:
type = type.lower()
klass = klass.lower()
if "constant" == type:
flag = 'flag' in options and options['flag']
if flag:
args.append(flag)
if name:
# if flag==name we assume user tried to name a constant
if not name == flag:
args.append('%s%s' % (options.get('prefix', ''), name))
elif "input" == type:
# handle multiple inputs
values = self.force_get_input_list(name)
if values and 'list' == klass:
values = values[0]
klass = options['type'].lower() \
if 'type' in options else 'string'
for value in values:
if 'flag' == klass:
if not value:
continue
if 'flag' in options and options['flag']:
value = options['flag']
else:
# use name as flag
value = name
elif klass in ('file', 'directory', 'path'):
value = value.name
# check for flag and append file name
if not 'flag' == klass and 'flag' in options:
args.append(options['flag'])
value = '%s%s' % (options.get('prefix', ''),
value)
args.append(value)
elif "output" == type:
# output must be a filename but we may convert the result to a string
# create new file
file = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
fname = file.name
if 'prefix' in options:
fname = options['prefix'] + fname
if 'flag' in options:
args.append(options['flag'])
args.append(fname)
if "file" == klass:
self.set_output(name, file)
elif "string" == klass:
setOutput.append((name, file))
else:
raise ValueError
elif "inputoutput" == type:
# handle single file that is both input and output
value = self.get_input(name)
# create copy of infile to operate on
outfile = self.interpreter.filePool.create_file(
suffix=options.get('suffix', DEFAULTFILESUFFIX))
try:
shutil.copyfile(value.name, outfile.name)
except IOError, e: # pragma: no cover
raise ModuleError(self,
"Error copying file '%s': %s" %
(value.name, debug.format_exception(e)))
value = '%s%s' % (options.get('prefix', ''), outfile.name)
# check for flag and append file name
if 'flag' in options:
args.append(options['flag'])
args.append(value)
self.set_output(name, outfile)
if "stdin" in self.conf:
name, type, options = self.conf["stdin"]
type = type.lower()
if self.has_input(name):
value = self.get_input(name)
if "file" == type:
if file_std:
f = open(value.name, 'rb')
else:
f = open(value.name, 'rb')
stdin = f.read()
f.close()
elif "string" == type:
if file_std:
file = self.interpreter.filePool.create_file()
f = open(file.name, 'wb')
f.write(value)
f.close()
f = open(file.name, 'rb')
else:
stdin = value
else: # pragma: no cover
raise ValueError
if file_std:
open_files.append(f)
kwargs['stdin'] = f.fileno()
else:
kwargs['stdin'] = subprocess.PIPE
if "stdout" in self.conf:
if file_std:
name, type, options = self.conf["stdout"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stdout'] = f.fileno()
else:
kwargs['stdout'] = subprocess.PIPE
if "stderr" in self.conf:
if file_std:
name, type, options = self.conf["stderr"]
type = type.lower()
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
if "file" == type:
self.set_output(name, file)
elif "string" == type:
setOutput.append((name, file))
else: # pragma: no cover
raise ValueError
f = open(file.name, 'wb')
open_files.append(f)
kwargs['stderr'] = f.fileno()
else:
kwargs['stderr'] = subprocess.PIPE
if fail_with_cmd:
return_code = 0
else:
return_code = self.conf.get('return_code', None)
env = {}
# 0. add defaults
# 1. add from configuration
# 2. add from module env
# 3. add from env port
if configuration.check('env'):
try:
for var in configuration.env.split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing configuration env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env' in self.conf['options']:
try:
for var in self.conf['options']['env'].split(";"):
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing module env: %s" % (
debug.format_exception(e)))
if 'options' in self.conf and 'env_port' in self.conf['options']:
for e in self.force_get_input_list('env'):
try:
for var in e.split(';'):
if not var:
continue
key, value = var.split('=')
key = key.strip()
value = value.strip()
if key:
env[key] = value
except Exception, e: # pragma: no cover
raise ModuleError(self,
"Error parsing env port: %s" % (
debug.format_exception(e)))
if env:
kwargs['env'] = dict(os.environ)
kwargs['env'].update(env)
# write to execution provenance
env = ';'.join(['%s=%s'%(k,v) for k,v in env.iteritems()])
self.annotate({'execution_env': env})
if 'dir' in self.conf:
kwargs['cwd'] = self.conf['dir']
process = subprocess.Popen(args, **kwargs)
if file_std:
process.wait()
else:
#if stdin:
# print "stdin:", len(stdin), stdin[:30]
stdout, stderr = _eintr_retry_call(process.communicate, stdin)
#stdout, stderr = process.communicate(stdin)
#if stdout:
# print "stdout:", len(stdout), stdout[:30]
#if stderr:
# print "stderr:", len(stderr), stderr[:30]
if return_code is not None:
if process.returncode != return_code:
raise ModuleError(self, "Command returned %d (!= %d)" % (
process.returncode, return_code))
self.set_output('return_code', process.returncode)
for f in open_files:
f.close()
for name, file in setOutput:
f = open(file.name, 'rb')
self.set_output(name, f.read())
f.close()
if not file_std:
if "stdout" in self.conf:
name, type, options = self.conf["stdout"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stdout)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stdout)
else: # pragma: no cover
raise ValueError
if "stderr" in self.conf:
name, type, options = self.conf["stderr"]
type = type.lower()
if "file" == type:
file = self.interpreter.filePool.create_file(
suffix=DEFAULTFILESUFFIX)
f = open(file.name, 'wb')
f.write(stderr)
f.close()
self.set_output(name, file)
elif "string" == type:
self.set_output(name, stderr)
else: # pragma: no cover
raise ValueError
# create docstring
d = """This module is a wrapper for the command line tool '%s'""" % \
conf['command']
# create module
M = new_module(CLTools, tool_name,{"compute": compute,
"conf": conf,
"tool_name": tool_name,
"__doc__": d})
reg = vistrails.core.modules.module_registry.get_module_registry()
reg.add_module(M, package=identifiers.identifier,
package_version=identifiers.version)
def to_vt_type(s):
# add recognized types here - default is String
return '(basic:%s)' % \
{'file':'File', 'path':'Path', 'directory': 'Directory',
'flag':'Boolean', 'list':'List',
'float':'Float','integer':'Integer'
}.get(s.lower(), 'String')
# add module ports
if 'stdin' in conf:
name, type, options = conf['stdin']
optional = 'required' not in options
reg.add_input_port(M, name, to_vt_type(type), optional=optional)
if 'stdout' in conf:
name, type, options = conf['stdout']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'stderr' in conf:
name, type, options = conf['stderr']
optional = 'required' not in options
reg.add_output_port(M, name, to_vt_type(type), optional=optional)
if 'options' in conf and 'env_port' in conf['options']:
reg.add_input_port(M, 'env', to_vt_type('string'))
for type, name, klass, options in conf['args']:
optional = 'required' not in options
if 'input' == type.lower():
reg.add_input_port(M, name, to_vt_type(klass), optional=optional)
elif 'output' == type.lower():
reg.add_output_port(M, name, to_vt_type(klass), optional=optional)
elif 'inputoutput' == type.lower():
reg.add_input_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, name, to_vt_type('file'), optional=optional)
reg.add_output_port(M, 'return_code', to_vt_type('integer'))
cl_tools[tool_name] = M
def add_tool(path):
try:
_add_tool(path)
except Exception as exc: # pragma: no cover
import traceback
debug.critical("Package CLTools failed to create module "
"from '%s': %s" % (path, exc),
traceback.format_exc())
def initialize(*args, **keywords):
reload_scripts(initial=True)
def remove_all_scripts():
reg = vistrails.core.modules.module_registry.get_module_registry()
for tool_name in cl_tools.keys():
del cl_tools[tool_name]
reg.delete_module(identifiers.identifier, tool_name)
def reload_scripts(initial=False, name=None):
reg = vistrails.core.modules.module_registry.get_module_registry()
if not initial:
if name is None:
remove_all_scripts()
else:
del cl_tools[name]
reg.delete_module(identifiers.identifier, name)
if "CLTools" == identifiers.name:
# this is the original package
location = os.path.join(vistrails.core.system.current_dot_vistrails(),
"CLTools")
# make sure dir exist
if not os.path.isdir(location): # pragma: no cover # pragma: no branch
try:
debug.log("Creating CLTools directory...")
os.mkdir(location)
except Exception, e:
debug.critical("Could not create CLTools directory. Make "
"sure '%s' does not exist and parent directory "
"is writable" % location,
e)
sys.exit(1)
else: # pragma: no cover
# this is a standalone package so modules are placed in this directory
location = os.path.dirname(__file__)
if initial:
reg.add_module(CLTools, abstract=True)
if name is None:
for path in os.listdir(location):
if path.endswith(SUFFIX): # pragma: no branch
add_tool(os.path.join(location, path))
else:
path = os.path.join(location, name + SUFFIX)
if os.path.exists(path):
add_tool(path)
if not initial:
from vistrails.core.interpreter.cached import CachedInterpreter
CachedInterpreter.clear_package(identifiers.identifier)
from vistrails.gui.vistrails_window import _app
_app.invalidate_pipelines()
wizards_list = []
def menu_items():
"""menu_items() -> tuple of (str,function)
It returns a list of pairs containing text for the menu and a
callback function that will be executed when that menu item is selected.
"""
try:
from wizard import QCLToolsWizardWindow
except Exception, e: # pragma: no cover
if "CLTools" == identifiers.name:
debug.unexpected_exception(e)
raise
else:
return
lst = []
if "CLTools" == identifiers.name: # pragma: no branch
def open_wizard():
window = QCLToolsWizardWindow(reload_scripts=reload_scripts)
wizards_list.append(window)
window.show()
lst.append(("Open Wizard", open_wizard))
lst.append(("Reload All Scripts", reload_scripts))
return tuple(lst)
def finalize():
pass
def contextMenuName(name):
if "CLTools" == name:
return "Reload All Scripts"
else:
return "Reload Script"
def callContextMenu(name):
if "CLTools" == name:
reload_scripts()
else:
reload_scripts(name=name)
###############################################################################
import unittest
from vistrails.tests.utils import execute, intercept_results
class TestCLTools(unittest.TestCase):
@classmethod
def setUpClass(cls):
# first make sure CLTools is loaded
pm = get_package_manager()
if 'CLTools' not in pm._package_list: # pragma: no cover # pragma: no branch
pm.late_enable_package('CLTools')
remove_all_scripts()
cls.testdir = os.path.join(packages_directory(), 'CLTools', 'test_files')
cls._tools = {}
for name in os.listdir(cls.testdir):
if not name.endswith(SUFFIX):
continue
_add_tool(os.path.join(cls.testdir, name))
toolname = os.path.splitext(name)[0]
cls._tools[toolname] = cl_tools[toolname]
cls._old_dir = os.getcwd()
os.chdir(vistrails_root_directory())
@classmethod
def tearDownClass(cls):
os.chdir(cls._old_dir)
reload_scripts()
def do_the_test(self, toolname):
with intercept_results(
self._tools[toolname],
'return_code', 'f_out', 'stdout') as (
return_code, f_out, stdout):
self.assertFalse(execute([
(toolname, 'org.vistrails.vistrails.cltools', [
('f_in', [('File', self.testdir + '/test_1.cltest')]),
('chars', [('List', '["a", "b", "c"]')]),
('false', [('Boolean', 'False')]),
('true', [('Boolean', 'True')]),
('nb', [('Integer', '42')]),
('stdin', [('String', 'some line\nignored')]),
]),
]))
self.assertEqual(return_code, [0])
self.assertEqual(f_out, ['ok\nmessage received'])
self.assertEqual(stdout, ['program output here'])
def test_with_pipes(self):
"""Without std_using_files: use pipes instead of files.
"""
self.do_the_test('intern_cltools_1')
def test_with_files(self):
"""With std_using_files: use files instead of pipes.
"""
self.do_the_test('intern_cltools_2')
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0=9ha2%ek5&qbfew_ndv6-^_x9fa=2g5yjzmfte+as^d$ao#1d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'frontend'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
]
|
from django.test import tag
from ..models import OrgUnit, Form, Instance, OrgUnitType, Account, Project, SourceVersion, DataSource
from math import floor
from rest_framework.test import APIClient
import json
from ..test import APITestCase
import typing
class BasicAPITestCase(APITestCase):
def setUp(self):
source = DataSource.objects.create(name="Source")
old_version = SourceVersion.objects.create(number=1, data_source=source)
default_version = SourceVersion.objects.create(number=2, data_source=source)
account = Account(name="Les Inconnus", default_version=default_version)
account.save()
self.project = Project(name="Le spectacle", app_id="org.inconnus.spectacle", account=account)
self.project.save()
unit_type = OrgUnitType(name="Hospital", short_name="Hosp")
unit_type.save()
self.project.unit_types.add(unit_type)
unit_type_2 = OrgUnitType(name="CDS", short_name="CDS")
unit_type_2.save()
self.project.unit_types.add(unit_type_2)
unit_type.sub_unit_types.add(unit_type_2)
OrgUnit.objects.create(version=old_version, name="Odd org unit", org_unit_type=unit_type)
self.form_1 = Form.objects.create(name="Hydroponics study")
self.form_2 = Form.objects.create(name="Another hydroponics study")
self.project.forms.add(self.form_1)
self.project.forms.add(self.form_2)
def test_org_unit_insertion(self):
"""Creating Org Units through the API"""
c = APIClient()
hospital_unit_type = OrgUnitType.objects.get(name="Hospital")
uuid = "f6ec1672-ab58-4fb2-a4a0-4af80573e2ae"
name = "Hopital Velpo"
# with latitude and longitude
unit_body = [
{
"id": uuid,
"latitude": 50.503,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"orgUnitTypeId": hospital_unit_type.id,
"parentId": None,
"longitude": 4.469,
"altitude": 110,
"accuracy": 0,
"time": 0,
"name": name,
}
]
response = c.post("/api/orgunits/?app_id=org.inconnus.spectacle", data=unit_body, format="json")
self.assertEqual(response.status_code, 200)
velpo_model = OrgUnit.objects.get(uuid=uuid)
self.assertEqual(velpo_model.name, name)
# Location should be filled
self.assertEqual(4.469, velpo_model.location.x)
self.assertEqual(50.503, velpo_model.location.y)
self.assertEqual(110, velpo_model.location.z)
# make sure APIImport record has been created
self.assertAPIImport("orgUnit", request_body=unit_body, has_problems=False)
response = c.get("/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json")
json_response = json.loads(response.content)
units = json_response["orgUnits"]
self.assertEqual(len(units), 0)
velpo_model.validation_status = OrgUnit.VALIDATION_VALID
velpo_model.save()
response = c.get("/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json")
content_1 = response.content
json_response = json.loads(response.content)
units = json_response["orgUnits"]
self.assertEqual(1, len(units)) # two org units but only one for default version
velpo_json = units[0]
self.assertEqual(velpo_json["name"], name)
self.assertEqual(floor(velpo_json["created_at"]), floor(1565194077692 / 1000))
self.assertTrue(floor(velpo_json["updated_at"]) > floor(1565194077693 / 1000))
self.assertEqual(velpo_json["org_unit_type_id"], hospital_unit_type.id)
self.assertEqual(velpo_json["parent_id"], None)
self.assertEqual(velpo_json["latitude"], 50.503)
self.assertEqual(velpo_json["longitude"], 4.469)
self.assertEqual(velpo_json["altitude"], 110)
self.assertEqual(velpo_json["id"], velpo_model.id)
response = c.get(
"/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json"
) # this should be the same result as without the app_id
content_2 = response.content
self.assertEqual(content_1, content_2)
response = c.get(
"/api/orgunits/?app_id=com.pascallegitimus.iaso", accept="application/json"
) # this should have 0 result
json_response = json.loads(response.content)
self.assertEqual(len(json_response["orgUnits"]), 0)
# inserting a child org_unit
uuid2 = "61e1dbfe-a1fc-4075-bfa2-5f3201c918f1"
name2 = "Hopital Sous Fifre"
# without latitude / longitude (our code handles lat=0, lng=0 as "no location provided")
unit_body_2 = {
"id": uuid2,
"latitude": 0,
"created_at": 1565194077699,
"updated_at": 1565194077800,
"orgUnitTypeId": hospital_unit_type.id,
"parentId": uuid,
"longitude": 0,
"altitude": 0,
"accuracy": 0,
"time": 0,
"name": name2,
}
response = c.post("/api/orgunits/?app_id=org.inconnus.spectacle", data=[unit_body_2], format="json")
self.assertEqual(response.status_code, 200)
fifre_model = OrgUnit.objects.get(uuid=uuid2)
self.assertEqual(fifre_model.name, name2)
# No location field should be filled
self.assertIsNone(fifre_model.location)
def test_org_unit_insertion_new_field_names(self):
"""Creating Org Units through the API but using org_unit_type_id and parent_id instead of orgUnitTypeId and parentId"""
c = APIClient()
hospital_unit_type = OrgUnitType.objects.get(name="Hospital")
uuid = "w5dg2671-aa59-4fb2-a4a0-4af80573e2de"
name = "Hopital Saint-André"
unit_body = [
{
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"org_unit_type_id": hospital_unit_type.id,
"parent_id": None,
"longitude": 0,
"accuracy": 0,
"time": 0,
"name": name,
}
]
response = c.post("/api/orgunits/?app_id=org.inconnus.spectacle", data=unit_body, format="json")
self.assertEqual(response.status_code, 200)
velpo_model = OrgUnit.objects.get(uuid=uuid)
self.assertEqual(velpo_model.name, name)
response = c.get("/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json")
json_response = json.loads(response.content)
units = json_response["orgUnits"]
self.assertEqual(len(units), 0)
velpo_model.validation_status = OrgUnit.VALIDATION_VALID
velpo_model.save()
response = c.get("/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json")
content_1 = response.content
json_response = json.loads(response.content)
units = json_response["orgUnits"]
velpo_json = units[0]
self.assertEqual(velpo_json["name"], name)
self.assertEqual(floor(velpo_json["created_at"]), floor(1565194077692 / 1000))
self.assertTrue(floor(velpo_json["updated_at"]) > floor(1565194077693 / 1000))
self.assertEqual(velpo_json["org_unit_type_id"], hospital_unit_type.id)
self.assertEqual(velpo_json["parent_id"], None)
self.assertIsNone(velpo_json["latitude"])
self.assertIsNone(velpo_json["longitude"])
self.assertIsNone(velpo_json["altitude"])
self.assertEqual(velpo_json["id"], velpo_model.id)
response = c.get(
"/api/orgunits/?app_id=org.inconnus.spectacle", accept="application/json"
) # this should be the same result as without the app_id
content_2 = response.content
self.assertEqual(content_1, content_2)
response = c.get(
"/api/orgunits/?app_id=com.pascallegitimus.iaso", accept="application/json"
) # this should have 0 result
json_response = json.loads(response.content)
self.assertEqual(len(json_response["orgUnits"]), 0)
# inserting a child org_unit
uuid2 = "61e1dbfe-a0fc-4075-bfa2-5f3201c918f3"
name2 = "Hopital Sous Fifre"
unit_body_2 = [
{
"id": uuid2,
"latitude": 0,
"created_at": 1565194077699,
"updated_at": 1565194077800,
"orgUnitTypeId": hospital_unit_type.id,
"parentId": uuid,
"longitude": 0,
"accuracy": 0,
"altitude": 0,
"time": 0,
"name": name2,
}
]
response = c.post("/api/orgunits/?app_id=org.inconnus.spectacle", data=unit_body_2, format="json")
self.assertEqual(response.status_code, 200)
fifre_model = OrgUnit.objects.get(uuid=uuid2)
self.assertEqual(fifre_model.name, name2)
# No app id - An APIImport record with has_problem set to True should be created
response = c.post("/api/orgunits/", data=unit_body_2, format="json")
self.assertEqual(response.status_code, 200)
self.assertAPIImport(
"orgUnit",
request_body=unit_body_2,
has_problems=True,
exception_contains_string="Could not find project for user",
)
# Wrong app id - An APIImport record with has_problem set to True should be created
response = c.post("/api/orgunits/?app_id=1234", data=unit_body_2, format="json")
self.assertEqual(response.status_code, 200)
self.assertAPIImport(
"orgUnit",
request_body=unit_body_2,
has_problems=True,
exception_contains_string="Could not find project for user",
)
def test_instance_insertion(self):
"""Creating Instance Units through the API"""
c = APIClient()
cds_unit_type = OrgUnitType.objects.get(name="CDS")
uuid = "f6ec1671-aa59-4fb2-a4a0-4af80573e2ae"
name = "Hopital Velpo"
unit_body = {
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"orgUnitTypeId": cds_unit_type.id,
"parentId": None,
"longitude": 0,
"accuracy": 0,
"time": 0,
"name": name,
}
response = c.post("/api/orgunits/?app_id=org.inconnus.spectacle", data=[unit_body], format="json")
self.assertJSONResponse(response, 200)
velpo_model = OrgUnit.objects.get(uuid=uuid)
uuid = "4b7c3954-f69a-4b99-83b1-db73957b32b8"
name = "Questionnaire CDS"
form = Form(name="CDS FORM")
form.save()
instance_body = [
{
"id": uuid,
"latitude": 4.4,
"created_at": 1565258153704,
"updated_at": 1565258153704,
"orgUnitId": velpo_model.id,
"formId": form.id,
"longitude": 4.4,
"accuracy": 10,
"altitude": 100,
"file": "\/storage\/emulated\/0\/odk\/instances\/RDC Collecte Data DPS_2_2019-08-08_11-54-46\/RDC Collecte Data DPS_2_2019-08-08_11-54-46.xml",
"name": name,
}
]
response = c.post("/api/instances/?app_id=org.inconnus.spectacle", data=instance_body, format="json")
self.assertEqual(response.status_code, 200)
instance = Instance.objects.get(uuid=uuid)
self.assertEqual(instance.name, name)
self.assertEqual(instance.org_unit_id, velpo_model.id)
self.assertEqual(instance.form_id, form.id)
self.assertEqual(instance.location.x, 4.4)
self.assertEqual(instance.location.y, 4.4)
self.assertEqual(instance.location.z, 100)
self.assertAPIImport("instance", request_body=instance_body, has_problems=False)
# No app id - An APIImport record with has_problem set to True should be created
response = c.post("/api/instances/", data=instance_body, format="json")
self.assertEqual(response.status_code, 200)
self.assertAPIImport(
"instance",
request_body=instance_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
# Wrong app id - An APIImport record with has_problem set to True should be created
response = c.post("/api/instances/?app_id=9876", data=instance_body, format="json")
self.assertEqual(response.status_code, 200)
self.assertAPIImport(
"instance",
request_body=instance_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
def test_fetch_org_unit_type(self):
"""Fetch Org Unit Types through the API"""
c = APIClient()
response = c.get(
"/api/orgunittypes/?app_id=com.pascallegitimus.iaso", accept="application/json"
) # this should have 0 result
json_response = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json_response["orgUnitTypes"]), 0)
response = c.get(
"/api/orgunittypes/?app_id=org.inconnus.spectacle", accept="application/json"
) # this should have 2 results
json_response = json.loads(response.content)
org_unit_types = json_response["orgUnitTypes"]
self.assertEqual(len(org_unit_types), 2)
found = False
for org_unit_type_data in org_unit_types:
self.assertValidOrgUnitTypeData(org_unit_type_data)
if org_unit_type_data["name"] == "Hospital":
self.assertLess(org_unit_type_data["created_at"], org_unit_type_data["updated_at"])
self.assertEqual(len(org_unit_type_data["sub_unit_types"]), 1)
for sub_org_unit_type_data in org_unit_type_data["sub_unit_types"]:
self.assertValidOrgUnitTypeData(sub_org_unit_type_data)
found = True
self.assertTrue(found)
def test_forms_list_with_app_id(self):
"""GET /forms/ mobile app happy path (no auth but with app id): 2 results"""
response = self.client.get(f"/api/forms/?app_id={self.project.app_id}")
self.assertJSONResponse(response, 200)
response_data = response.json()
self.assertValidFormListData(response_data, 2)
for form_data in response_data["forms"]:
self.assertValidFormData(form_data)
# noinspection DuplicatedCode
def assertValidFormListData(self, list_data: typing.Mapping, expected_length: int, paginated: bool = False):
self.assertValidListData(
list_data=list_data, expected_length=expected_length, results_key="forms", paginated=paginated
)
for form_data in list_data["forms"]:
self.assertValidFormData(form_data)
# noinspection DuplicatedCode
def assertValidFormData(self, form_data: typing.Mapping):
self.assertHasField(form_data, "id", int)
self.assertHasField(form_data, "name", str)
self.assertHasField(form_data, "periods_before_allowed", int)
self.assertHasField(form_data, "periods_after_allowed", int)
self.assertHasField(form_data, "created_at", float)
self.assertHasField(form_data, "updated_at", float)
# noinspection DuplicatedCode
def assertValidFullFormData(self, form_data: typing.Mapping):
self.assertValidFormData(form_data)
self.assertHasField(form_data, "device_field", str)
self.assertHasField(form_data, "location_field", str)
self.assertHasField(form_data, "form_id", str)
self.assertHasField(form_data, "period_type", str)
self.assertHasField(form_data, "single_per_period", bool)
self.assertHasField(form_data, "org_unit_types", list)
self.assertHasField(form_data, "projects", list)
self.assertHasField(form_data, "instances_count", int)
self.assertHasField(form_data, "instance_updated_at", float)
for org_unit_type_data in form_data["org_unit_types"]:
self.assertIsInstance(org_unit_type_data, dict)
self.assertHasField(org_unit_type_data, "id", int)
for project_data in form_data["projects"]:
self.assertIsInstance(project_data, dict)
self.assertHasField(project_data, "id", int)
self.assertHasField(form_data, "instance_updated_at", float)
self.assertHasField(form_data, "instances_count", int)
# noinspection DuplicatedCode
def assertValidOrgUnitTypeData(self, org_unit_type_data):
self.assertHasField(org_unit_type_data, "id", int)
self.assertHasField(org_unit_type_data, "name", str)
self.assertHasField(org_unit_type_data, "short_name", str)
self.assertHasField(org_unit_type_data, "depth", int, optional=True)
self.assertHasField(org_unit_type_data, "sub_unit_types", list, optional=True)
self.assertHasField(org_unit_type_data, "created_at", float)
if "sub_unit_types" in org_unit_type_data:
for sub_org_unit_type_data in org_unit_type_data["sub_unit_types"]:
self.assertValidOrgUnitTypeData(sub_org_unit_type_data)
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListExtendsParamsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'business_type_id': 'str',
'incident_sub_type_id': 'str',
'product_category_id': 'str',
'x_site': 'int',
'x_language': 'str',
'x_time_zone': 'str'
}
attribute_map = {
'business_type_id': 'business_type_id',
'incident_sub_type_id': 'incident_sub_type_id',
'product_category_id': 'product_category_id',
'x_site': 'X-Site',
'x_language': 'X-Language',
'x_time_zone': 'X-Time-Zone'
}
def __init__(self, business_type_id=None, incident_sub_type_id=None, product_category_id=None, x_site=None, x_language=None, x_time_zone=None):
"""ListExtendsParamsRequest - a model defined in huaweicloud sdk"""
self._business_type_id = None
self._incident_sub_type_id = None
self._product_category_id = None
self._x_site = None
self._x_language = None
self._x_time_zone = None
self.discriminator = None
self.business_type_id = business_type_id
if incident_sub_type_id is not None:
self.incident_sub_type_id = incident_sub_type_id
if product_category_id is not None:
self.product_category_id = product_category_id
if x_site is not None:
self.x_site = x_site
if x_language is not None:
self.x_language = x_language
if x_time_zone is not None:
self.x_time_zone = x_time_zone
@property
def business_type_id(self):
"""Gets the business_type_id of this ListExtendsParamsRequest.
业务类型id
:return: The business_type_id of this ListExtendsParamsRequest.
:rtype: str
"""
return self._business_type_id
@business_type_id.setter
def business_type_id(self, business_type_id):
"""Sets the business_type_id of this ListExtendsParamsRequest.
业务类型id
:param business_type_id: The business_type_id of this ListExtendsParamsRequest.
:type: str
"""
self._business_type_id = business_type_id
@property
def incident_sub_type_id(self):
"""Gets the incident_sub_type_id of this ListExtendsParamsRequest.
工单子类型id
:return: The incident_sub_type_id of this ListExtendsParamsRequest.
:rtype: str
"""
return self._incident_sub_type_id
@incident_sub_type_id.setter
def incident_sub_type_id(self, incident_sub_type_id):
"""Sets the incident_sub_type_id of this ListExtendsParamsRequest.
工单子类型id
:param incident_sub_type_id: The incident_sub_type_id of this ListExtendsParamsRequest.
:type: str
"""
self._incident_sub_type_id = incident_sub_type_id
@property
def product_category_id(self):
"""Gets the product_category_id of this ListExtendsParamsRequest.
产品类型id
:return: The product_category_id of this ListExtendsParamsRequest.
:rtype: str
"""
return self._product_category_id
@product_category_id.setter
def product_category_id(self, product_category_id):
"""Sets the product_category_id of this ListExtendsParamsRequest.
产品类型id
:param product_category_id: The product_category_id of this ListExtendsParamsRequest.
:type: str
"""
self._product_category_id = product_category_id
@property
def x_site(self):
"""Gets the x_site of this ListExtendsParamsRequest.
对接站点信息。 0(中国站) 1(国际站),不填的话默认为0。
:return: The x_site of this ListExtendsParamsRequest.
:rtype: int
"""
return self._x_site
@x_site.setter
def x_site(self, x_site):
"""Sets the x_site of this ListExtendsParamsRequest.
对接站点信息。 0(中国站) 1(国际站),不填的话默认为0。
:param x_site: The x_site of this ListExtendsParamsRequest.
:type: int
"""
self._x_site = x_site
@property
def x_language(self):
"""Gets the x_language of this ListExtendsParamsRequest.
语言环境,值为通用的语言描述字符串,比如zh-cn等,默认为zh-cn。 会根据语言环境对应展示一些国际化的信息,比如工单类型名称等。
:return: The x_language of this ListExtendsParamsRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListExtendsParamsRequest.
语言环境,值为通用的语言描述字符串,比如zh-cn等,默认为zh-cn。 会根据语言环境对应展示一些国际化的信息,比如工单类型名称等。
:param x_language: The x_language of this ListExtendsParamsRequest.
:type: str
"""
self._x_language = x_language
@property
def x_time_zone(self):
"""Gets the x_time_zone of this ListExtendsParamsRequest.
环境时区,值为通用的时区描述字符串,比如GMT+8等,默认为GMT+8。 涉及时间的数据会根据环境时区处理。
:return: The x_time_zone of this ListExtendsParamsRequest.
:rtype: str
"""
return self._x_time_zone
@x_time_zone.setter
def x_time_zone(self, x_time_zone):
"""Sets the x_time_zone of this ListExtendsParamsRequest.
环境时区,值为通用的时区描述字符串,比如GMT+8等,默认为GMT+8。 涉及时间的数据会根据环境时区处理。
:param x_time_zone: The x_time_zone of this ListExtendsParamsRequest.
:type: str
"""
self._x_time_zone = x_time_zone
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListExtendsParamsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name,receiver):
# Creating message subject and sender
subject = 'Welcome to Joseph shitandi Instagram clone'
sender = 'jsphshtnd@gmail.com'
#passing in the context vairables
text_content = render_to_string('email/newsemail.txt',{"name": name})
html_content = render_to_string('email/newsemail.html',{"name": name})
msg = EmailMultiAlternatives(subject,text_content,sender,[receiver])
msg.attach_alternative(html_content,'text/html')
msg.send()
|
import torch
import torch.nn as nn
from utils.network_utils import *
from networks.architectures.base_modules import *
class UNetDecoder(nn.Module):
def __init__(self, opt, nf):
super(UNetDecoder, self).__init__()
ic, oc, norm_type, act_type, mode = \
opt.ic, opt.oc, opt.norm_type, opt.act_type, opt.dec_mode
self.u1 = UpSample('TC', nf)
self.u2 = UpSample('TC', nf//2)
self.u3 = UpSample('TC', nf//4)
self.u4 = UpSample('TC', nf//8)
self.blk1 = UNetBlk_Concat(nf, nf//2, nf//2, norm_type, act_type, False, mode)
self.blk2 = UNetBlk_Concat(nf//2, nf//4, nf//4, norm_type, act_type, False, mode)
self.blk3 = UNetBlk_Concat(nf//4, nf//8, nf//8, norm_type, act_type, False, mode)
self.blk4 = UNetBlk_Concat(nf//8, nf//16, nf//16, norm_type, act_type, False, mode)
self.blk5 = nn.Conv2d(nf//16, oc, 1, 1, 0, True)
def forward(self, e):
out = self.blk1(self.u1(e[4]), e[3])
out = self.blk2(self.u2(out), e[2])
out = self.blk3(self.u3(out), e[1])
out = self.blk4(self.u4(out), e[0])
out = self.blk5(out)
return out
class PSPNetDecoder(nn.Module):
def __init__(self, opt, nf):
super(PSPNetDecoder, self).__init__()
ic, oc, norm_type, act_type, mode = \
opt.ic, opt.oc, opt.norm_type, opt.act_type, opt.dec_mode
self.branch1 = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(nf, nf, 1, 1, 0)
)
self.branch2 = nn.Sequential(
nn.AdaptiveAvgPool2d(2),
nn.Conv2d(nf, nf, 1, 1, 0)
)
self.branch3 = nn.Sequential(
nn.AdaptiveAvgPool2d(3),
nn.Conv2d(nf, nf, 1, 1, 0)
)
self.branch4 = nn.Sequential(
nn.AdaptiveAvgPool2d(6),
nn.Conv2d(nf, nf, 1, 1, 0)
)
self.branch5 = nn.Conv2d(nf, nf, 1, 1, 0)
self.aux = nn.Sequential(
nn.Conv2d(nf//2, 512, 3, 1, 1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, oc, 1, 1, 0)
)
self.final = nn.Sequential(
nn.Conv2d(nf*5, 512, 3, 1, 1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, oc, 1, 1, 0)
)
def forward(self, e):
aux, main = e[3], e[4]
size = (main.shape[-2], main.shape[-1])
b1 = F.interpolate(self.branch1(main), size=size)
b2 = F.interpolate(self.branch2(main), size=size)
b3 = F.interpolate(self.branch3(main), size=size)
b4 = F.interpolate(self.branch4(main), size=size)
out = torch.cat([b1, b2, b3, b4, main], dim = 1)
out = self.final(out)
aux = self.aux(aux)
return aux, out
class PAM(nn.Module):
def __init__(self, nf):
super(PAM, self).__init__()
self.query_conv = nn.Conv2d(nf, nf//8, 1, 1, 0)
self.key_conv = nn.Conv2d(nf, nf//8, 1, 1, 0)
self.value_conv = nn.Conv2d(nf, nf, 1, 1, 0)
self.alpha = nn.Parameter(torch.zeros(1))
def forward(self, x):
bs, nf, h, w = x.shape
query = self.query_conv(x).view(bs, nf//8, h*w)
key = self.key_conv(x).view(bs, nf//8, h*w).permute(0, 2, 1)
value = self.value_conv(x).view(bs, nf, h*w)
out = torch.softmax(torch.bmm(key, query), dim = -1).permute(0, 2, 1)
out = torch.bmm(value, out).reshape(bs, nf, h, w)
out = x + self.alpha * out
return out
class CAM(nn.Module):
def __init__(self, nf):
super(CAM, self).__init__()
self.query_conv = nn.Conv2d(nf, nf//8, 1, 1, 0)
self.key_conv = nn.Conv2d(nf, nf//8, 1, 1, 0)
self.value_conv = nn.Conv2d(nf, nf, 1, 1, 0)
self.alpha = nn.Parameter(torch.zeros(1))
def forward(self, x):
bs, nf, h, w = x.shape
query = x.view(bs, nf, h*w)
key = x.view(bs, nf, h*w).permute(0, 2, 1)
value = x.view(bs, nf, h*w)
out = torch.softmax(torch.bmm(query, key), dim = -1)
out = torch.bmm(out, value).reshape(bs, nf, h, w)
out = x + self.alpha * out
return out
class DANetDecoder(nn.Module):
def __init__(self, opt, nf):
super(DANetDecoder, self).__init__()
ic, oc, norm_type, act_type, mode = \
opt.ic, opt.oc, opt.norm_type, opt.act_type, opt.dec_mode
self.PAM_branch = nn.Sequential(
nn.Conv2d(nf, nf//4, 3, 1, 1),
nn.BatchNorm2d(nf//4), nn.ReLU(),
PAM(nf//4),
nn.Conv2d(nf//4, nf//4, 3, 1, 1),
nn.BatchNorm2d(nf//4), nn.ReLU()
)
self.CAM_branch = nn.Sequential(
nn.Conv2d(nf, nf//4, 3, 1, 1),
nn.BatchNorm2d(nf//4), nn.ReLU(),
CAM(nf//4),
nn.Conv2d(nf//4, nf//4, 3, 1, 1),
nn.BatchNorm2d(nf//4), nn.ReLU()
)
self.PAM_aux = nn.Conv2d(nf//4, oc, 1, 1, 0)
self.CAM_aux = nn.Conv2d(nf//4, oc, 1, 1, 0)
self.up_p_aux = UpSample('BI', m = 8)
self.up_c_aux = UpSample('BI', m = 8)
if(mode == 'fullres'):
self.final = nn.Conv2d(nf//4, nf//4, 3, 1, 1)
self.upsample = nn.Sequential(
UpSample('BI'), UNetBlk(nf//4, nf//4, norm_type, act_type, False),
UpSample('BI'), UNetBlk(nf//4, nf//8, norm_type, act_type, False),
UpSample('BI'), UNetBlk(nf//8, nf//16, norm_type, act_type, False),
nn.Conv2d(nf//16, oc, 1, 1, 0)
)
else:
self.final = nn.Conv2d(nf//4, oc, 1, 1, 0)
self.upsample = UpSample('BI', m=8)
def forward(self, x):
p = self.PAM_branch(x[-1])
p_aux = self.PAM_aux(p)
c = self.CAM_branch(x[-1])
c_aux = self.CAM_aux(c)
out = self.final(p + c)
p_aux, c_aux, out = self.up_p_aux(p_aux), self.up_c_aux(c_aux), self.upsample(out)
return p_aux, c_aux, out
|
# coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OneOfEventData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""OneOfEventData - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OneOfEventData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfEventData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Note: The information criteria add 1 to the number of parameters
# whenever the model has an AR or MA term since, in principle,
# the variance could be treated as a free parameter and restricted
# This code does not allow this, but it adds consistency with other
# packages such as gretl and X12-ARIMA
from __future__ import absolute_import
from statsmodels.compat.python import string_types, range
# for 2to3 with extensions
from datetime import datetime
import numpy as np
from scipy import optimize
from scipy.stats import t, norm
from scipy.signal import lfilter
from numpy import dot, log, zeros, pi
from numpy.linalg import inv
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams,
_ma_transparams, _ma_invtransparams,
unintegrate, unintegrate_levels)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs
#from statsmodels.tsa.base.datetools import _index_date
from statsmodels.tsa.kalmanf import KalmanFilter
_armax_notes = """
Notes
-----
If exogenous variables are given, then the model that is fit is
.. math::
\\phi(L)(y_t - X_t\\beta) = \\theta(L)\epsilon_t
where :math:`\\phi` and :math:`\\theta` are polynomials in the lag
operator, :math:`L`. This is the regression model with ARMA errors,
or ARMAX model. This specification is used, whether or not the model
is fit using conditional sum of square or maximum-likelihood, using
the `method` argument in
:meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for
now, `css` and `mle` refer to estimation methods only. This may
change for the case of the `css` model in future versions.
"""
_arma_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_arma_model = "Autoregressive Moving Average ARMA(p,q) Model"
_arima_model = "Autoregressive Integrated Moving Average ARIMA(p,d,q) Model"
_arima_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_predict_notes = """
Notes
-----
Use the results predict method instead.
"""
_results_notes = """
Notes
-----
It is recommended to use dates with the time-series models, as the
below will probably make clear. However, if ARIMA is used without
dates and/or `start` and `end` are given as indices, then these
indices are in terms of the *original*, undifferenced series. Ie.,
given some undifferenced observations::
1970Q1, 1
1970Q2, 1.5
1970Q3, 1.25
1970Q4, 2.25
1971Q1, 1.2
1971Q2, 4.1
1970Q1 is observation 0 in the original series. However, if we fit an
ARIMA(p,1,q) model then we lose this first observation through
differencing. Therefore, the first observation we can forecast (if
using exact MLE) is index 1. In the differenced series this is index
0, but we refer to it as 1 from the original series.
"""
_predict = """
%(Model)s model in-sample and out-of-sample prediction
Parameters
----------
%(params)s
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction.
exog : array-like, optional
If the model is an ARMAX and out-of-sample forecasting is
requested, exog must be given. Note that you'll need to pass
`k_ar` additional lags for any exogenous variables. E.g., if you
fit an ARMAX(2, q) model and want to predict 5 steps, you need 7
observations to do this.
dynamic : bool, optional
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
%(extra_params)s
Returns
-------
%(returns)s
%(extra_section)s
"""
_predict_returns = """predict : array
The predicted values.
"""
_arma_predict = _predict % {"Model" : "ARMA",
"params" : """
params : array-like
The fitted parameters of the model.""",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _predict_notes}
_arma_results_predict = _predict % {"Model" : "ARMA", "params" : "",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_predict = _predict % {"Model" : "ARIMA",
"params" : """params : array-like
The fitted parameters of the model.""",
"extra_params" : """typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""", "returns" : _predict_returns,
"extra_section" : _predict_notes}
_arima_results_predict = _predict % {"Model" : "ARIMA",
"params" : "",
"extra_params" :
"""typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_plot_predict_example = """ Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>>
>>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]
>>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')
>>> res = sm.tsa.ARMA(dta, (3, 0)).fit()
>>> fig, ax = plt.subplots()
>>> ax = dta.ix['1950':].plot(ax=ax)
>>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,
... plot_insample=False)
>>> plt.show()
.. plot:: plots/arma_predict_plot.py
"""
_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' + _results_notes)
}
_arima_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' +
'\n'.join(_results_notes.split('\n')[:3]) +
("""
This is hard-coded to only allow plotting of the forecasts in levels.
""") +
'\n'.join(_results_notes.split('\n')[3:]))
}
def cumsum_n(x, n):
if n:
n -= 1
x = np.cumsum(x)
return cumsum_n(x, n)
else:
return x
def _check_arima_start(start, k_ar, k_diff, method, dynamic):
if start < 0:
raise ValueError("The start index %d of the original series "
"has been differenced away" % start)
elif (dynamic or 'mle' not in method) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,
trendparam, exparams, arparams, maparams, steps,
method, exog=None):
"""
Returns endog, resid, mu of appropriate length for out of sample
prediction.
"""
if q:
resid = np.zeros(q)
if start and 'mle' in method or (start == p and not start == 0):
resid[:q] = errors[start-q:start]
elif start:
resid[:q] = errors[start-q-p:start-p]
else:
resid[:q] = errors[-q:]
else:
resid = None
y = endog
if k_trend == 1:
# use expectation not constant
if k_exog > 0:
#TODO: technically should only hold for MLE not
# conditional model. See #274.
# ensure 2-d for conformability
if np.ndim(exog) == 1 and k_exog == 1:
# have a 1d series of observations -> 2d
exog = exog[:, None]
elif np.ndim(exog) == 1:
# should have a 1d row of exog -> 2d
if len(exog) != k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')
mu = trendparam * (1 - arparams.sum())
# arparams were reversed in unpack for ease later
mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = trendparam * (1 - arparams.sum())
mu = np.array([mu]*steps)
elif k_exog > 0:
X = np.dot(exog, exparams)
#NOTE: you shouldn't have to give in-sample exog!
X = lagmat(X, p, original='in', trim='both')
mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = np.zeros(steps)
endog = np.zeros(p + steps - 1)
if p and start:
endog[:p] = y[start-p:start]
elif p:
endog[:p] = y[-p:]
return endog, resid, mu
def _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,
endog, exog=None, start=0, method='mle'):
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q), k_trend,
k_exog, reverse=True)
# print 'params:',params
# print 'arparams:',arparams,'maparams:',maparams
endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,
start, errors, trendparam,
exparams, arparams,
maparams, steps, method,
exog)
# print 'mu[-1]:',mu[-1], 'mu[0]:',mu[0]
forecast = np.zeros(steps)
if steps == 1:
if q:
return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,
resid[:q]), mu[0]
else:
return mu[0] + np.dot(arparams, endog[:p]), mu[0]
if q:
i = 0 # if q == 1
else:
i = -1
for i in range(min(q, steps - 1)):
fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +
np.dot(maparams[:q - i], resid[i:i + q]))
forecast[i] = fcast
endog[i+p] = fcast
for i in range(i + 1, steps - 1):
fcast = mu[i] + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i+p] = fcast
#need to do one more without updating endog
forecast[-1] = mu[-1] + np.dot(arparams, endog[steps - 1:])
return forecast, mu[-1] #Modified by me, the former is return forecast
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method):
"""
Pre- and in-sample fitting for ARMA.
"""
if 'mle' in method:
fittedvalues = endog - resid # get them all then trim
else:
fittedvalues = endog[k_ar:] - resid
fv_start = start
if 'mle' not in method:
fv_start -= k_ar # start is in terms of endog index
fv_end = min(len(fittedvalues), end + 1)
return fittedvalues[fv_start:fv_end]
def _validate(start, k_ar, k_diff, dates, method):
if isinstance(start, (string_types, datetime)):
start = _index_date(start, dates)
start -= k_diff
if 'mle' not in method and start < k_ar - k_diff:
raise ValueError("Start must be >= k_ar for conditional "
"MLE or dynamic forecast. Got %s" % start)
return start
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order, exog_names):
k_ar, k_ma = order
exog_names = exog_names or []
ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)
ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]
ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
def _check_estimable(nobs, n_params):
if nobs <= n_params:
raise ValueError("Insufficient degrees of freedom to estimate")
class ARMA(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : _arma_model,
"params" : _arma_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
self.constant = 0 #Added by me
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p, q, k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
maxlag = int(round(12*(nobs/100.)**(1/4.)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError("Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument.")
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'),
arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]
)) < 1):
raise ValueError("The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]
)) < 1):
return np.zeros(len(start_params)) #modified by me
raise ValueError("The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
# print start_params
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12,
pgtol=1e-7, factr=1e3,
bounds=bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
return approx_fprime_cs(params, self.loglike, args=(False,))
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
return approx_hess_cs(params, self.loglike, args=(False,))
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start, dynamic):
# do some defaults
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
k_diff = getattr(self, 'k_diff', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
self._set_predict_start_date(start) # else it's done in super
elif isinstance(start, int):
start = super(ARMA, self)._get_predict_start(start)
else: # should be on a date
#elif 'mle' not in method or dynamic: # should be on a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARMA, self)._get_predict_start(start)
_check_arima_start(start, k_ar, k_diff, method, dynamic)
return start
def _get_predict_end(self, end, dynamic=False):
# pass through so predict works for ARIMA and ARMA
return super(ARMA, self)._get_predict_end(end)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
method = getattr(self, 'method', 'mle')
if 'mle' in method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,
self)
errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat,
paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= dot(self.exog, params[:k])
k_ar = self.k_ar
k_ma = self.k_ma
(trendparams, exparams,
arparams, maparams) = _unpack_params(params, (k_ar, k_ma),
self.k_trend, self.k_exog,
reverse=False)
b, a = np.r_[1, -arparams], np.r_[1, maparams]
zi = zeros((max(k_ar, k_ma)))
for i in range(k_ar):
zi[i] = sum(-b[:i+1][::-1]*y[:i+1])
e = lfilter(b, a, y, zi=zi)
errors = e[0][k_ar:]
return errors.squeeze()
def predict(self, params, start=None, end=None, exog=None, dynamic=False):
method = getattr(self, 'method', 'mle') # don't assume fit
#params = np.asarray(params)
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end, dynamic)
if out_of_sample and (exog is None and self.k_exog > 0):
raise ValueError("You must provide exog for ARMAX")
endog = self.endog
resid = self.geterrors(params)
k_ar = self.k_ar
if out_of_sample != 0 and self.k_exog > 0:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
# we need the last k_ar exog for the lag-polynomial
if self.k_exog > 0 and k_ar > 0:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))
if dynamic:
#TODO: now that predict does dynamic in-sample it should
# also return error estimates and confidence intervals
# but how? len(endog) is not tot_obs
out_of_sample += end - start + 1
pr, ct = _arma_predict_out_of_sample(params, out_of_sample, resid,
k_ar, self.k_ma, self.k_trend,
self.k_exog, endog, exog,
start, method)
self.constant = ct
return pr
predictedvalues = _arma_predict_in_sample(start, end, endog, resid,
k_ar, method)
if out_of_sample:
forecastvalues, ct = _arma_predict_out_of_sample(params, out_of_sample,
resid, k_ar,
self.k_ma,
self.k_trend,
self.k_exog, endog,
exog, method=method)
self.constant = ct
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
predict.__doc__ = _arma_predict
def loglike(self, params, set_sigma2=True):
"""
Compute the log-likelihood for ARMA(p,q) model
Notes
-----
Likelihood used depends on the method set in fit
"""
method = self.method
if method in ['mle', 'css-mle']:
return self.loglike_kalman(params, set_sigma2)
elif method == 'css':
return self.loglike_css(params, set_sigma2)
else:
raise ValueError("Method %s not understood" % method)
def loglike_kalman(self, params, set_sigma2=True):
"""
Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.
"""
return KalmanFilter.loglike(params, self, set_sigma2)
def loglike_css(self, params, set_sigma2=True):
"""
Conditional Sum of Squares likelihood function.
"""
k_ar = self.k_ar
k_ma = self.k_ma
k = self.k_exog + self.k_trend
y = self.endog.copy().astype(params.dtype)
nobs = self.nobs
# how to handle if empty?
if self.transparams:
newparams = self._transparams(params)
else:
newparams = params
if k > 0:
y -= dot(self.exog, newparams[:k])
# the order of p determines how many zeros errors to set for lfilter
b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]
zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)
for i in range(k_ar):
zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])
errors = lfilter(b, a, y, zi=zi)[0][k_ar:]
ssr = np.dot(errors, errors)
sigma2 = ssr/nobs
if set_sigma2:
self.sigma2 = sigma2
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError("Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable.")
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),
self.exog_names)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == 'css':
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = np.asarray(start_params)
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(ARMA, self).fit(start_params, method=solver,
maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods don't expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
armafit.mlefit = mlefit
return ARMAResultsWrapper(armafit)
#NOTE: the length of endog changes when we give a difference to fit
#so model methods are not the same on unfit models as fit ones
#starting to think that order of model should be put in instantiation...
class ARIMA(ARMA):
__doc__ = tsbase._tsa_doc % {"model" : _arima_model,
"params" : _arima_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARIMA"}}
def __new__(cls, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d == 0: # then we just use an ARMA model
return ARMA(endog, (p, q), exog, dates, freq, missing)
else:
mod = super(ARIMA, cls).__new__(cls)
mod.__init__(endog, order, exog, dates, freq, missing)
return mod
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d > 2:
#NOTE: to make more general, need to address the d == 2 stuff
# in the predict method
raise ValueError("d > 2 is not supported")
super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)
self.k_diff = d
self._first_unintegrate = unintegrate_levels(self.endog[:d], d)
self.endog = np.diff(self.endog, n=d)
#NOTE: will check in ARMA but check again since differenced now
_check_estimable(len(self.endog), p+q)
if exog is not None:
self.exog = self.exog[d:]
if d == 1:
self.data.ynames = 'D.' + self.endog_names
else:
self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names
# what about exog, should we difference it automatically before
# super call?
def _get_predict_start(self, start, dynamic):
"""
"""
#TODO: remove all these getattr and move order specification to
# class constructor
k_diff = getattr(self, 'k_diff', 0)
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
elif isinstance(start, int):
start -= k_diff
try: # catch when given an integer outside of dates index
start = super(ARIMA, self)._get_predict_start(start,
dynamic)
except IndexError:
raise ValueError("start must be in series. "
"got %d" % (start + k_diff))
else: # received a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARIMA, self)._get_predict_start(start, dynamic)
# reset date for k_diff adjustment
self._set_predict_start_date(start + k_diff)
return start
def _get_predict_end(self, end, dynamic=False):
"""
Returns last index to be forecast of the differenced array.
Handling of inclusiveness should be done in the predict function.
"""
end, out_of_sample = super(ARIMA, self)._get_predict_end(end, dynamic)
if 'mle' not in self.method and not dynamic:
end -= self.k_ar
return end - self.k_diff, out_of_sample
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
`statsmodels.tsa.arima.ARIMAResults` class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARIMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
arima_fit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
normalized_cov_params = None # TODO: fix this?
arima_fit = ARIMAResults(self, arima_fit._results.params,
normalized_cov_params)
arima_fit.k_diff = self.k_diff
return ARIMAResultsWrapper(arima_fit)
def predict(self, params, start=None, end=None, exog=None, typ='linear',
dynamic=False):
# go ahead and convert to an index for easier checking
if isinstance(start, (string_types, datetime)):
start = _index_date(start, self.data.dates)
if typ == 'linear':
if not dynamic or (start != self.k_ar + self.k_diff and
start is not None):
return super(ARIMA, self).predict(params, start, end, exog,
dynamic)
else:
# need to assume pre-sample residuals are zero
# do this by a hack
q = self.k_ma
self.k_ma = 0
predictedvalues = super(ARIMA, self).predict(params, start,
end, exog,
dynamic)
self.k_ma = q
return predictedvalues
elif typ == 'levels':
endog = self.data.endog
if not dynamic:
predict = super(ARIMA, self).predict(params, start, end,
dynamic)
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
d = self.k_diff
if 'mle' in self.method:
start += d - 1 # for case where d == 2
end += d - 1
# add each predicted diff to lagged endog
if out_of_sample:
fv = predict[:-out_of_sample] + endog[start:end+1]
if d == 2: #TODO: make a general solution to this
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[start:end + 1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
k_ar = self.k_ar
if out_of_sample:
fv = (predict[:-out_of_sample] +
endog[max(start, self.k_ar-1):end+k_ar+1])
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[max(start, k_ar):end+k_ar+1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
#IFF we need to use pre-sample values assume pre-sample
# residuals are zero, do this by a hack
if start == self.k_ar + self.k_diff or start is None:
# do the first k_diff+1 separately
p = self.k_ar
q = self.k_ma
k_exog = self.k_exog
k_trend = self.k_trend
k_diff = self.k_diff
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q),
k_trend,
k_exog,
reverse=True)
# this is the hack
self.k_ma = 0
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
if not start:
start = self._get_predict_start(start, dynamic)
start += k_diff
self.k_ma = q
return endog[start-1] + np.cumsum(predict)
else:
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
return endog[start-1] + np.cumsum(predict)
return fv
else: # pragma : no cover
raise ValueError("typ %s not understood" % typ)
predict.__doc__ = _arima_predict
class ARMAResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an ARMA model.
Parameters
----------
model : ARMA instance
The fitted model instance
params : array
Fitted parameters
normalized_cov_params : array, optional
The normalized variance covariance matrix
scale : float, optional
Optional argument to scale the variance covariance matrix.
Returns
--------
**Attributes**
aic : float
Akaike Information Criterion
:math:`-2*llf+2* df_model`
where `df_model` includes all AR parameters, MA parameters, constant
terms parameters on constant terms and the variance.
arparams : array
The parameters associated with the AR coefficients in the model.
arroots : array
The roots of the AR coefficients are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
bic : float
Bayes Information Criterion
-2*llf + log(nobs)*df_model
Where if the model is fit using conditional sum of squares, the
number of observations `nobs` does not include the `p` pre-sample
observations.
bse : array
The standard errors of the parameters. These are computed using the
numerical Hessian.
df_model : array
The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`
df_resid : array
The residual degrees of freedom = `nobs` - `df_model`
fittedvalues : array
The predicted values of the model.
hqic : float
Hannan-Quinn Information Criterion
-2*llf + 2*(`df_model`)*log(log(nobs))
Like `bic` if the model is fit using conditional sum of squares then
the `k_ar` pre-sample observations are not counted in `nobs`.
k_ar : int
The number of AR coefficients in the model.
k_exog : int
The number of exogenous variables included in the model. Does not
include the constant.
k_ma : int
The number of MA coefficients.
k_trend : int
This is 0 for no constant or 1 if a constant is included.
llf : float
The value of the log-likelihood function evaluated at `params`.
maparams : array
The value of the moving average coefficients.
maroots : array
The roots of the MA coefficients are the solution to
(1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0
Stability requires that the roots in modules lie outside the unit
circle.
model : ARMA instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model. If the model is fit
using exact maximum likelihood this is equal to the total number of
observations, `n_totobs`. If the model is fit using conditional
maximum likelihood this is equal to `n_totobs` - `k_ar`.
n_totobs : float
The total number of observations for `endog`. This includes all
observations, even pre-sample values if the model is fit using `css`.
params : array
The parameters of the model. The order of variables is the trend
coefficients and the `k_exog` exognous coefficients, then the
`k_ar` AR coefficients, and finally the `k_ma` MA coefficients.
pvalues : array
The p-values associated with the t-values of the coefficients. Note
that the coefficients are assumed to have a Student's T distribution.
resid : array
The model residuals. If the model is fit using 'mle' then the
residuals are created via the Kalman Filter. If the model is fit
using 'css' then the residuals are obtained via `scipy.signal.lfilter`
adjusted such that the first `k_ma` residuals are zero. These zero
residuals are not returned.
scale : float
This is currently set to 1.0 and not used by the model or its results.
sigma2 : float
The variance of the residuals. If the model is fit by 'css',
sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If
the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)
where v is the one-step forecast error and F is the forecast error
variance. See `nobs` for the difference in definitions depending on the
fit.
"""
_cache = {}
#TODO: use this for docstring when we fix nobs issue
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARMAResults, self).__init__(model, params, normalized_cov_params,
scale)
self.sigma2 = model.sigma2
nobs = model.nobs
self.nobs = nobs
k_exog = model.k_exog
self.k_exog = k_exog
k_trend = model.k_trend
self.k_trend = k_trend
k_ar = model.k_ar
self.k_ar = k_ar
self.n_totobs = len(model.endog)
k_ma = model.k_ma
self.k_ma = k_ma
df_model = k_exog + k_trend + k_ar + k_ma
self._ic_df_model = df_model + 1
self.df_model = df_model
self.df_resid = self.nobs - df_model
self._cache = resettable_cache()
self.constant = 0 #Added by me
@cache_readonly
def arroots(self):
return np.roots(np.r_[1, -self.arparams])**-1
@cache_readonly
def maroots(self):
return np.roots(np.r_[1, self.maparams])**-1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def mafreq(self):
r"""
Returns the frequency of the MA roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def arparams(self):
k = self.k_exog + self.k_trend
return self.params[k:k+self.k_ar]
@cache_readonly
def maparams(self):
k = self.k_exog + self.k_trend
k_ar = self.k_ar
return self.params[k+k_ar:]
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
params = self.params
hess = self.model.hessian(params)
if len(params) == 1: # can't take an inverse, ensure 1d
return np.sqrt(-1./hess[0])
return np.sqrt(np.diag(-inv(hess)))
def cov_params(self): # add scale argument?
params = self.params
hess = self.model.hessian(params)
return -inv(hess)
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * self._ic_df_model
@cache_readonly
def bic(self):
nobs = self.nobs
return -2 * self.llf + np.log(nobs) * self._ic_df_model
@cache_readonly
def hqic(self):
nobs = self.nobs
return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model
@cache_readonly
def fittedvalues(self):
model = self.model
endog = model.endog.copy()
k_ar = self.k_ar
exog = model.exog # this is a copy
if exog is not None:
if model.method == "css" and k_ar > 0:
exog = exog[k_ar:]
if model.method == "css" and k_ar > 0:
endog = endog[k_ar:]
fv = endog - self.resid
# add deterministic part back in
#k = self.k_exog + self.k_trend
#TODO: this needs to be commented out for MLE with constant
#if k != 0:
# fv += dot(exog, self.params[:k])
return fv
@cache_readonly
def resid(self):
return self.model.geterrors(self.params)
@cache_readonly
def pvalues(self):
#TODO: same for conditional and unconditional?
df_resid = self.df_resid
return t.sf(np.abs(self.tvalues), df_resid) * 2
def predict(self, start=None, end=None, exog=None, dynamic=False):
return self.model.predict(self.params, start, end, exog, dynamic)
predict.__doc__ = _arma_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))
return fcasterr
def _forecast_conf_int(self, forecast, fcasterr, alpha):
const = norm.ppf(1 - alpha / 2.)
conf_int = np.c_[forecast - const * fcasterr,
forecast + const * fcasterr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
"""
if exog is not None:
#TODO: make a convenience function for this. we're using the
# pattern elsewhere in the codebase
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
elif exog.ndim == 1:
if len(exog) != self.k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast, ct = _arma_predict_out_of_sample(self.params,
steps, self.resid, self.k_ar,
self.k_ma, self.k_trend,
self.k_exog, self.model.endog,
exog, method=self.model.method)
self.constant = ct
# compute the standard errors
fcasterr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)
return forecast, fcasterr, conf_int
def summary(self, alpha=.05):
"""Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
model = self.model
title = model.__class__.__name__ + ' Model Results'
method = model.method
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
if not k_diff:
order = str((k_ar, k_ma))
else:
order = str((k_ar, k_diff, k_ma))
top_left = [('Dep. Variable:', None),
('Model:', [model.__class__.__name__ + order]),
('Method:', [method]),
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [str(len(self.model.endog))]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('S.D. of innovations', ["%#5.3f" % self.sigma2**.5]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0,0 model
stubs = []
if len(stubs): # not 0, 0
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(data,
headers=[' Real',
' Imaginary',
' Modulus',
' Frequency'],
title="Roots",
stubs=stubs,
data_fmts=["%17.4f", "%+17.4fj",
"%17.4f", "%17.4f"])
smry.tables.append(roots_table)
return smry
def summary2(self, title=None, alpha=.05, float_format="%.4f"):
"""Experimental summary function for ARIMA Results
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from pandas import DataFrame
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in self.model.method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
# Roots table
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0, 0 order
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
data = DataFrame(data)
data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']
data.index = stubs
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
# Model info
model_info = summary2.summary_model(self)
model_info['Method:'] = self.model.method
model_info['Sample:'] = sample[0]
model_info[' '] = sample[-1]
model_info['S.D. of innovations:'] = "%#5.3f" % self.sigma2**.5
model_info['HQIC:'] = "%#5.3f" % self.hqic
model_info['No. Observations:'] = str(len(self.model.endog))
# Parameters
params = summary2.summary_params(self)
smry.add_dict(model_info)
smry.add_df(params, float_format=float_format)
if len(stubs):
smry.add_df(data, float_format="%17.4f")
smry.add_title(results=self, title=title)
return smry
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=False)
end, out_of_sample = self.model._get_predict_end(end, dynamic=False)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],
label=self.model.endog_names)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _plot_predict
class ARMAResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)
class ARIMAResults(ARMAResults):
def predict(self, start=None, end=None, exog=None, typ='linear',
dynamic=False):
return self.model.predict(self.params, start, end, exog, typ, dynamic)
predict.__doc__ = _arima_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)
return fcerr
def _forecast_conf_int(self, forecast, fcerr, alpha):
const = norm.ppf(1 - alpha/2.)
conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARIMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
Notes
-----
Prediction is done in the levels of the original endogenous variable.
If you would like prediction of differences in levels use `predict`.
"""
if exog is not None:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast, ct = _arma_predict_out_of_sample(self.params, steps, self.resid,
self.k_ar, self.k_ma,
self.k_trend, self.k_exog,
self.model.endog,
exog, method=self.model.method)
#self.constant = ct
d = self.k_diff
endog = self.model.data.endog[-d:]
forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]
# get forecast errors
fcerr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcerr, alpha)
return forecast, fcerr, conf_int
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, 'levels', dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=dynamic)
end, out_of_sample = self.model._get_predict_end(end, dynamic=dynamic)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
import re
k_diff = self.k_diff
label = re.sub("D\d*\.", "", self.model.endog_names)
levels = unintegrate(self.model.endog,
self.model._first_unintegrate)
ax.plot(x[:end + 1 - start],
levels[start + k_diff:end + k_diff + 1], label=label)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _arima_plot_predict
class ARIMAResultsWrapper(ARMAResultsWrapper):
pass
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults)
if __name__ == "__main__":
import statsmodels.api as sm
# simulate arma process
from statsmodels.tsa.arima_process import arma_generate_sample
y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
arma = ARMA(y)
res = arma.fit(trend='nc', order=(1, 1))
np.random.seed(12345)
y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
nsample=1000)
arma22 = ARMA(y_arma22)
res22 = arma22.fit(trend='nc', order=(2, 2))
# test CSS
arma22_css = ARMA(y_arma22)
res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')
data = sm.datasets.sunspots.load()
ar = ARMA(data.endog)
resar = ar.fit(trend='nc', order=(9, 0))
y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],
nsample=1000)
arma31css = ARMA(y_arma31)
res31css = arma31css.fit(order=(3, 1), method="css", trend="nc",
transparams=True)
y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],
nsample=1000)
arma13css = ARMA(y_arma13)
res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')
# check css for p < q and q < p
y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],
nsample=1000)
arma41css = ARMA(y_arma41)
res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')
y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],
nsample=1000)
arma14css = ARMA(y_arma14)
res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')
# ARIMA Model
from statsmodels.datasets import webuse
dta = webuse('wpi1')
wpi = dta['wpi']
mod = ARIMA(wpi, (1, 1, 1)).fit()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ListFlowEntitySnapshotRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ListFlowEntitySnapshot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageCount(self):
return self.get_query_params().get('PageCount')
def set_PageCount(self,PageCount):
self.add_query_param('PageCount',PageCount)
def get_OrderMode(self):
return self.get_query_params().get('OrderMode')
def set_OrderMode(self,OrderMode):
self.add_query_param('OrderMode',OrderMode)
def get_EntityId(self):
return self.get_query_params().get('EntityId')
def set_EntityId(self,EntityId):
self.add_query_param('EntityId',EntityId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Limit(self):
return self.get_query_params().get('Limit')
def set_Limit(self,Limit):
self.add_query_param('Limit',Limit)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CommitterId(self):
return self.get_query_params().get('CommitterId')
def set_CommitterId(self,CommitterId):
self.add_query_param('CommitterId',CommitterId)
def get_CurrentSize(self):
return self.get_query_params().get('CurrentSize')
def set_CurrentSize(self,CurrentSize):
self.add_query_param('CurrentSize',CurrentSize)
def get_OrderField(self):
return self.get_query_params().get('OrderField')
def set_OrderField(self,OrderField):
self.add_query_param('OrderField',OrderField)
def get_EntityGroupId(self):
return self.get_query_params().get('EntityGroupId')
def set_EntityGroupId(self,EntityGroupId):
self.add_query_param('EntityGroupId',EntityGroupId)
def get_Revision(self):
return self.get_query_params().get('Revision')
def set_Revision(self,Revision):
self.add_query_param('Revision',Revision)
def get_EntityType(self):
return self.get_query_params().get('EntityType')
def set_EntityType(self,EntityType):
self.add_query_param('EntityType',EntityType)
|
#!/usr/bin/env python3
#
# Copyright 2018 Brian T. Park
#
# MIT License.
"""
Read the raw TZ Database files at the location specified by `--input_dir` and
generate the zonedb files in various formats as determined by the '--action'
flag:
* --action tzdb
JSON file representation of the internal zonedb named 'tzdb.json'.
* --action zonedb
The zone_infos.*, zone_policies.*, and sometimes the zone_registry.* and
zone_strings.*, files in various languages.
* --action zonelist
Write just the raw list of zone names named 'zones.txt'.
The --output_dir flag determines the directory where various files should
be created. If empty, it means the same as $PWD.
If '--action zonedb' is selected, there are 2 language options available
using the --language flag:
* --language arduino
* --language python
The raw TZ Database are parsed by extractor.py and processed
transformer.py. The Transformer class accepts a number of options:
* --scope {basic | extended)
* --start_year {start}
* --until_year {until}
* --granularity {seconds}
* --until_at_granularity {seconds}
* --offset_granularity {seconds}
* --strict
which determine which Rules or Zones are retained during the 'transformation'
process.
If --language arduino is selected, the following flags are used:
* --db_namespace {db_namespace}
Use the given identifier as the C++ namespace of the generated classes.
* --generate_zone_strings
Generate the 'zone_strings.*' files as well.
Examples:
See tzcompiler.sh
"""
import argparse
import logging
import sys
from typing_extensions import Protocol
from tzdb.extractor import Extractor
from tzdb.transformer import Transformer
from tzdb.tzdbcollector import TzDbCollector, TzDb
from zonedb.argenerator import ArduinoGenerator
from zonedb.pygenerator import PythonGenerator
from zonedb.ingenerator import InlineGenerator
from zonedb.zonelistgenerator import ZoneListGenerator
from zonedb.bufestimator import BufSizeEstimator
class Generator(Protocol):
def generate_files(self, name: str) -> None:
...
def generate_zonedb(
invocation: str,
db_namespace: str,
language: str,
output_dir: str,
generate_zone_strings: bool,
tzdb: TzDb,
) -> None:
logging.info('======== Generating zonedb files')
# Generate internal versions of zone_infos and zone_policies
# so that ZoneSpecifier can be created.
logging.info('==== Generating inlined zone_infos and zone_policies')
inline_generator = InlineGenerator(tzdb['zones_map'], tzdb['rules_map'])
(zone_infos, zone_policies) = inline_generator.generate_maps()
logging.info(
'zone_infos=%d; zone_policies=%d',
len(zone_infos), len(zone_policies))
generator: Generator
# Create the Python or Arduino files as requested
if language == 'python':
logging.info('==== Creating Python zonedb files')
generator = PythonGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(output_dir)
elif language == 'arduino':
logging.info('==== Creating Arduino zonedb files')
# Determine zonedb C++ namespace
# TODO: Maybe move this into ArduinoGenerator?
if not db_namespace:
if tzdb['scope'] == 'basic':
db_namespace = 'zonedb'
elif tzdb['scope'] == 'extended':
db_namespace = 'zonedbx'
else:
raise Exception(
f"db_namespace cannot be determined for "
f"scope '{tzdb['scope']}'"
)
# Generate the buf_size estimates for each zone, between start_year and
# until_year.
logging.info('==== Estimating transition buffer sizes')
logging.info(
'Checking years in [%d, %d)',
tzdb['start_year'], tzdb['until_year'])
estimator = BufSizeEstimator(
zone_infos, zone_policies, tzdb['start_year'], tzdb['until_year'])
(buf_sizes, max_size) = estimator.estimate()
logging.info(
'Num zones=%d; Max buffer size=%d',
len(buf_sizes), max_size,
)
generator = ArduinoGenerator(
invocation=invocation,
db_namespace=db_namespace,
generate_zone_strings=generate_zone_strings,
tzdb=tzdb,
buf_sizes=buf_sizes,
)
generator.generate_files(output_dir)
else:
raise Exception("Unrecognized language '%s'" % language)
def main() -> None:
"""
Main driver for TZ Database compiler which parses the IANA TZ Database files
located at the --input_dir and generates zoneinfo files and validation
datasets for unit tests at --output_dir.
Usage:
tzcompiler.py [flags...]
"""
# Configure command line flags.
parser = argparse.ArgumentParser(description='Generate Zone Info.')
# Extractor flags.
parser.add_argument(
'--input_dir', help='Location of the input directory', required=True)
# Transformer flags.
parser.add_argument(
'--scope',
# basic: 241 of the simpler time zones for BasicZoneSpecifier
# extended: all 348 time zones for ExtendedZoneSpecifier
choices=['basic', 'extended'],
help='Size of the generated database (basic|extended)',
required=True)
parser.add_argument(
'--start_year',
help='Start year of Zone Eras (default: 2000)',
type=int,
default=2000)
parser.add_argument(
'--until_year',
help='Until year of Zone Eras (default: 2038)',
type=int,
default=2038)
parser.add_argument(
'--granularity',
help=(
'Truncate UNTIL, AT, SAVE and RULES fields to '
+ 'this many seconds (default: 60)'
),
type=int)
parser.add_argument(
'--until_at_granularity',
help=(
'Truncate UNTIL and AT fields to this many seconds '
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--offset_granularity',
help=(
'Truncate SAVE, RULES (offset) fields to this many seconds'
+ '(default: --granularity)'
),
type=int)
parser.add_argument(
'--strict',
help='Remove zones and rules not aligned at granularity time boundary',
action='store_true',
default=False)
# Data pipeline selectors. Comma-separated list.
# tzdb: generate 'tzdb.json'
# zonedb: generate zonedb ('zone_infos.*', 'zone_poicies.*') files
# zonelist: generate 'zones.txt' containing relavant zone names
parser.add_argument(
'--action',
help='Type of target(s) to generate',
required=True)
# Language selector (for --action zonedb)
parser.add_argument(
'--language',
choices=['arduino', 'python'],
help='Target language (arduino|python)',
)
# For '--language arduino', the following flags are used.
#
# C++ namespace names for '--language arduino'. If not specified, it will
# automatically be set to 'zonedb' or 'zonedbx' depending on the 'scope'.
parser.add_argument(
'--db_namespace',
help='C++ namespace for the zonedb files (default: zonedb or zonedbx)')
# Generated zone_strings.{h,cpp} files.
parser.add_argument(
'--generate_zone_strings',
help='Generate Arduino zone_strings.{h,cpp} files',
action='store_true')
# The tz_version does not affect any data processing. Its value is
# copied into the various generated files and usually placed in the
# comments section to describe the source of the data that generated the
# various files.
parser.add_argument(
'--tz_version',
help='Version string of the TZ files',
required=True,
)
# Target location of the generated files.
parser.add_argument(
'--output_dir',
help='Location of the output directory',
default='',
)
# Parse the command line arguments
args = parser.parse_args()
# Manually parse the comma-separated --action.
actions = set(args.action.split(','))
allowed_actions = set(['tzdb', 'zonedb', 'zonelist'])
if not actions.issubset(allowed_actions):
print(f'Invalid --action: {actions - allowed_actions}')
sys.exit(1)
# Configure logging. This should normally be executed after the
# parser.parse_args() because it allows us set the logging.level using a
# flag.
logging.basicConfig(level=logging.INFO)
# How the script was invoked
invocation = ' '.join(sys.argv)
# Define scope-dependent granularity if not overridden by flag
if args.granularity:
until_at_granularity = args.granularity
offset_granularity = args.granularity
else:
if args.until_at_granularity:
until_at_granularity = args.until_at_granularity
else:
until_at_granularity = 60
if args.offset_granularity:
offset_granularity = args.offset_granularity
else:
if args.scope == 'basic':
offset_granularity = 900
else:
offset_granularity = 60
logging.info('Using UNTIL/AT granularity: %d', until_at_granularity)
logging.info(
'Using RULES/SAVE (offset) granularity: %d',
offset_granularity)
# Extract the TZ files
logging.info('======== Extracting TZ Data files')
extractor = Extractor(args.input_dir)
extractor.parse()
extractor.print_summary()
rules_map, zones_map, links_map = extractor.get_data()
# Transform the TZ zones and rules
logging.info('======== Transforming Zones and Rules')
logging.info('Extracting years [%d, %d)', args.start_year, args.until_year)
transformer = Transformer(
zones_map,
rules_map,
links_map,
args.scope,
args.start_year,
args.until_year,
until_at_granularity,
offset_granularity,
args.strict,
)
transformer.transform()
transformer.print_summary()
(
zones_map, rules_map, links_map, removed_zones, removed_policies,
removed_links, notable_zones, notable_policies, notable_links,
format_strings, zone_strings,
) = transformer.get_data()
# Collect TZ DB data into a single JSON-serializable object.
tzdb_generator = TzDbCollector(
tz_version=args.tz_version,
tz_files=Extractor.ZONE_FILES,
scope=args.scope,
start_year=args.start_year,
until_year=args.until_year,
until_at_granularity=until_at_granularity,
offset_granularity=offset_granularity,
strict=args.strict,
zones_map=zones_map,
links_map=links_map,
rules_map=rules_map,
removed_zones=removed_zones,
removed_links=removed_links,
removed_policies=removed_policies,
notable_zones=notable_zones,
notable_links=notable_links,
notable_policies=notable_policies,
format_strings=format_strings,
zone_strings=zone_strings,
)
tzdb = tzdb_generator.get_data()
for action in actions:
if action == 'zonedb':
generate_zonedb(
invocation=invocation,
db_namespace=args.db_namespace,
language=args.language,
output_dir=args.output_dir,
generate_zone_strings=args.generate_zone_strings,
tzdb=tzdb,
)
elif action == 'tzdb':
logging.info('======== Creating JSON zonedb files')
tzdb_generator.generate_files(args.output_dir)
elif action == 'zonelist':
logging.info('======== Creating zones.txt')
generator = ZoneListGenerator(
invocation=invocation,
tzdb=tzdb,
)
generator.generate_files(args.output_dir)
else:
logging.error(f"Unrecognized action '{action}'")
sys.exit(1)
logging.info('======== Finished processing TZ Data files.')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from simmate.calculators.vasp.tasks.base import VaspTask
from simmate.calculators.vasp.inputs.potcar_mappings import (
PBE_ELEMENT_MAPPINGS_LOW_QUALITY,
)
class Quality03Relaxation(VaspTask):
# returns structure separately from vasprun object
return_final_structure = True
# This uses the PBE functional with POTCARs that have lower electron counts
# and convergence criteria when available.
functional = "PBE"
potcar_mappings = PBE_ELEMENT_MAPPINGS_LOW_QUALITY
# because this calculation is such a low quality we don't raise an error
# if the calculation fails to converge
confirm_convergence = False
# Make the unitcell relatively cubic before relaxing
pre_sanitize_structure = True
# These are all input settings for this task.
incar = dict(
# These settings are the same for all structures regardless of composition.
PREC="Normal",
EDIFF=1e-4,
ENCUT=425, # !!! Should this be based on the element type?
ISIF=3,
NSW=100,
IBRION=1,
POTIM=0.25,
LCHARG=False,
LWAVE=False,
KSPACING=0.5,
# The type of smearing we use depends on if we have a metal, semiconductor,
# or insulator. So we need to decide this using a keyword modifier.
multiple_keywords__smart_ismear={
"metal": dict(
ISMEAR=1,
SIGMA=0.06,
),
"non-metal": dict(
ISMEAR=0,
SIGMA=0.05,
),
},
)
|
import taichi as ti
from pytest import approx
import autograd.numpy as np
from autograd import grad
@ti.all_archs
def grad_test(tifunc, npfunc=None):
if npfunc is None:
npfunc = tifunc
x = ti.var(ti.f32)
y = ti.var(ti.f32)
@ti.layout
def place():
ti.root.dense(ti.i, 1).place(x, x.grad, y, y.grad)
@ti.kernel
def func():
for i in x:
y[i] = tifunc(x[i])
v = 0.2
y.grad[0] = 1
x[0] = v
func()
func.grad()
assert y[0] == approx(npfunc(v))
assert x.grad[0] == approx(grad(npfunc)(v))
def test_unary():
import time
t = time.time()
grad_test(lambda x: ti.sqrt(x), lambda x: np.sqrt(x))
grad_test(lambda x: ti.exp(x), lambda x: np.exp(x))
grad_test(lambda x: ti.log(x), lambda x: np.log(x))
ti.core.print_profile_info()
print("Total time {:.3f}s".format(time.time() - t))
test_unary()
|
import numpy as np
import pandas as pd
import pylab as plt
import components.visualization
from components.flowUtils import annotateProgress, cached
class PerformanceUserMatrixPlot:
def __init__(self, flow, orderUsers=None):
self.flow = flow
self.performanceMatrix = flow.getPerformanceMatrix(flow.getProblems())
self.orderUsers = orderUsers
def plotBase(self):
pdMatrix = self.performanceMatrix.transpose()
if self.orderUsers:
pdMatrix = pdMatrix.reindex(columns=sorted(pdMatrix.columns, key=lambda uid: self.orderUsers(uid) ))
def color(c):
if np.isnan(c):
return np.array([1.0, 0.0, 0.0])
return np.array([c, c, c])
# pdMatrix = pdMatrix.head(100)
npMatrix = pdMatrix.applymap(color).as_matrix()
npMatrix = np.concatenate(npMatrix.flatten())
npMatrix = np.reshape(npMatrix, (pdMatrix.shape[0], pdMatrix.shape[1], 3))
plt.imshow(npMatrix, interpolation='nearest')
plt.title(str(self.flow))
@annotateProgress
def plot(self):
self.plotBase()
plt.show()
@annotateProgress
def saveplot(self, path):
plt.clf()
self.plotBase()
plt.savefig(path)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pydm.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(677, 465)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 677, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuHistory = QtWidgets.QMenu(self.menubar)
self.menuHistory.setObjectName("menuHistory")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.navbar = QtWidgets.QToolBar(MainWindow)
self.navbar.setMovable(False)
self.navbar.setFloatable(False)
self.navbar.setObjectName("navbar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.navbar)
self.actionEdit_in_Designer = QtWidgets.QAction(MainWindow)
self.actionEdit_in_Designer.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionEdit_in_Designer.setObjectName("actionEdit_in_Designer")
self.actionAbout_PyDM = QtWidgets.QAction(MainWindow)
self.actionAbout_PyDM.setEnabled(True)
self.actionAbout_PyDM.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionAbout_PyDM.setObjectName("actionAbout_PyDM")
self.actionReload_Display = QtWidgets.QAction(MainWindow)
self.actionReload_Display.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionReload_Display.setObjectName("actionReload_Display")
self.actionIncrease_Font_Size = QtWidgets.QAction(MainWindow)
self.actionIncrease_Font_Size.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionIncrease_Font_Size.setObjectName("actionIncrease_Font_Size")
self.actionDecrease_Font_Size = QtWidgets.QAction(MainWindow)
self.actionDecrease_Font_Size.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionDecrease_Font_Size.setObjectName("actionDecrease_Font_Size")
self.actionShow_File_Path_in_Title_Bar = QtWidgets.QAction(MainWindow)
self.actionShow_File_Path_in_Title_Bar.setCheckable(True)
self.actionShow_File_Path_in_Title_Bar.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionShow_File_Path_in_Title_Bar.setObjectName("actionShow_File_Path_in_Title_Bar")
self.actionBack = QtWidgets.QAction(MainWindow)
self.actionBack.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionBack.setObjectName("actionBack")
self.actionForward = QtWidgets.QAction(MainWindow)
self.actionForward.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionForward.setObjectName("actionForward")
self.actionHome = QtWidgets.QAction(MainWindow)
self.actionHome.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionHome.setObjectName("actionHome")
self.actionShow_Navigation_Bar = QtWidgets.QAction(MainWindow)
self.actionShow_Navigation_Bar.setCheckable(True)
self.actionShow_Navigation_Bar.setChecked(True)
self.actionShow_Navigation_Bar.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionShow_Navigation_Bar.setObjectName("actionShow_Navigation_Bar")
self.actionOpen_File = QtWidgets.QAction(MainWindow)
self.actionOpen_File.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionOpen_File.setObjectName("actionOpen_File")
self.actionShow_Menu_Bar = QtWidgets.QAction(MainWindow)
self.actionShow_Menu_Bar.setCheckable(True)
self.actionShow_Menu_Bar.setChecked(True)
self.actionShow_Menu_Bar.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionShow_Menu_Bar.setObjectName("actionShow_Menu_Bar")
self.actionShow_Status_Bar = QtWidgets.QAction(MainWindow)
self.actionShow_Status_Bar.setCheckable(True)
self.actionShow_Status_Bar.setChecked(True)
self.actionShow_Status_Bar.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionShow_Status_Bar.setObjectName("actionShow_Status_Bar")
self.actionShow_Connections = QtWidgets.QAction(MainWindow)
self.actionShow_Connections.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionShow_Connections.setObjectName("actionShow_Connections")
self.actionLoadTool = QtWidgets.QAction(MainWindow)
self.actionLoadTool.setObjectName("actionLoadTool")
self.actionEnter_Fullscreen = QtWidgets.QAction(MainWindow)
self.actionEnter_Fullscreen.setObjectName("actionEnter_Fullscreen")
self.actionDefault_Font_Size = QtWidgets.QAction(MainWindow)
self.actionDefault_Font_Size.setObjectName("actionDefault_Font_Size")
self.menuFile.addAction(self.actionOpen_File)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionEdit_in_Designer)
self.menuFile.addAction(self.actionReload_Display)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionAbout_PyDM)
self.menuView.addAction(self.actionEnter_Fullscreen)
self.menuView.addAction(self.actionIncrease_Font_Size)
self.menuView.addAction(self.actionDecrease_Font_Size)
self.menuView.addAction(self.actionDefault_Font_Size)
self.menuView.addSeparator()
self.menuView.addAction(self.actionShow_File_Path_in_Title_Bar)
self.menuView.addAction(self.actionShow_Navigation_Bar)
self.menuView.addAction(self.actionShow_Menu_Bar)
self.menuView.addAction(self.actionShow_Status_Bar)
self.menuView.addAction(self.actionShow_Connections)
self.menuHistory.addAction(self.actionBack)
self.menuHistory.addAction(self.actionForward)
self.menuHistory.addAction(self.actionHome)
self.menuTools.addAction(self.actionLoadTool)
self.menuTools.addSeparator()
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHistory.menuAction())
self.menubar.addAction(self.menuTools.menuAction())
self.navbar.addAction(self.actionBack)
self.navbar.addAction(self.actionForward)
self.navbar.addSeparator()
self.navbar.addAction(self.actionHome)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "PyDM Main Window"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuHistory.setTitle(_translate("MainWindow", "History"))
self.menuTools.setTitle(_translate("MainWindow", "Tools"))
self.navbar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionEdit_in_Designer.setText(_translate("MainWindow", "Edit in Designer"))
self.actionAbout_PyDM.setText(_translate("MainWindow", "About PyDM"))
self.actionReload_Display.setText(_translate("MainWindow", "Reload Display"))
self.actionReload_Display.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.actionIncrease_Font_Size.setText(_translate("MainWindow", "Increase Font Size"))
self.actionIncrease_Font_Size.setShortcut(_translate("MainWindow", "Ctrl+="))
self.actionDecrease_Font_Size.setText(_translate("MainWindow", "Decrease Font Size"))
self.actionDecrease_Font_Size.setShortcut(_translate("MainWindow", "Ctrl+-"))
self.actionShow_File_Path_in_Title_Bar.setText(_translate("MainWindow", "Show File Path in Title Bar"))
self.actionBack.setText(_translate("MainWindow", "Back"))
self.actionBack.setShortcut(_translate("MainWindow", "Ctrl+Left"))
self.actionForward.setText(_translate("MainWindow", "Forward"))
self.actionForward.setShortcut(_translate("MainWindow", "Ctrl+Right"))
self.actionHome.setText(_translate("MainWindow", "Home"))
self.actionHome.setShortcut(_translate("MainWindow", "Ctrl+H"))
self.actionShow_Navigation_Bar.setText(_translate("MainWindow", "Show Navigation Bar"))
self.actionOpen_File.setText(_translate("MainWindow", "Open File..."))
self.actionOpen_File.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.actionShow_Menu_Bar.setText(_translate("MainWindow", "Show Menu Bar"))
self.actionShow_Menu_Bar.setShortcut(_translate("MainWindow", "Ctrl+M"))
self.actionShow_Status_Bar.setText(_translate("MainWindow", "Show Status Bar"))
self.actionShow_Connections.setText(_translate("MainWindow", "Show Connections..."))
self.actionLoadTool.setText(_translate("MainWindow", "Load..."))
self.actionEnter_Fullscreen.setText(_translate("MainWindow", "Enter Fullscreen"))
self.actionEnter_Fullscreen.setShortcut(_translate("MainWindow", "F11"))
self.actionDefault_Font_Size.setText(_translate("MainWindow", "Default Font Size"))
self.actionDefault_Font_Size.setShortcut(_translate("MainWindow", "Ctrl+0"))
|
from abc import ABC, abstractmethod
from collections import namedtuple
import ctypes
import json
import logging
import os
import requests
import socket
import subprocess
import sys
import urllib.parse
from utils import clip
try:
# omxplayer is only available on Raspberry Pi
from omxplayer.player import OMXPlayer # noqa
HAVE_OMXPLAYER = True
except ModuleNotFoundError:
HAVE_OMXPLAYER = False
try:
import vlc
HAVE_LIBVLC = True
except ModuleNotFoundError:
HAVE_LIBVLC = False
Rectangle = namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
VideoSize = namedtuple('VideoSize', ['width', 'height'])
def get_video_size(filepath):
result = subprocess.run(['ffprobe',
'-v', 'error',
'-print_format', 'json',
'-select_streams', 'v:0',
'-show_entries', 'stream=width,height',
str(filepath)],
capture_output=True, text=True)
result = json.loads(result.stdout)
return VideoSize(width=result['streams'][0]['width'], height=result['streams'][0]['height'])
class PlayerInterface(ABC):
def __init__(self, exe, name, default_args, player_parameters):
self.exe = exe
self.name = name
assert name in PlayerLookup.keys()
self.default_args = default_args
self.player_parameters = player_parameters
self.child = None
def is_finished(self):
if self.child:
finished = self.child.poll() is not None
if finished:
self.child = None
return finished
else:
return True
def _play(self, filepath, allocate_pty):
cmd = [self.exe] + self.default_args + [filepath.resolve()]
logging.debug("PlayerInterface._play: %s", cmd)
if allocate_pty:
master, slave = os.openpty()
self.child = subprocess.Popen(cmd,
stdin=master,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
self.child_stdin = slave
else:
self.child = subprocess.Popen(cmd)
self.child_stdin = None
def play(self, filepath, widget=None):
# default implementation
return self._play(filepath, allocate_pty=False)
@abstractmethod
def play_pause(self):
raise NotImplementedError()
def stop(self):
logging.debug("PlayerInterface::stop (%s)", self.child)
if self.child:
self.child.kill()
self.child = None
def volume_change(self, change):
pass
def window_size_changed(self, new_size):
pass
class MPlayer(PlayerInterface):
def __init__(self, exe, default_args, player_parameters):
self.fifo_name = '/tmp/picave.mplayer-fifo'
if default_args is None:
default_args = ['-geometry', '0:0',
'-slave',
'-input', 'file=%s' % self.fifo_name]
super().__init__(exe, "mplayer", default_args, player_parameters)
if os.path.exists(self.fifo_name):
os.remove(self.fifo_name)
def __del__(self):
if os.path.exists(self.fifo_name):
os.remove(self.fifo_name)
def play(self, filepath, widget=None):
# TODO: Switch to using _play() ?
if not os.path.exists(self.fifo_name):
os.mkfifo(self.fifo_name)
cmd = [self.exe] + self.default_args + [filepath]
self.child = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL)
def play_pause(self):
logging.debug("MPlayer::play_pause")
self.send_command('pause')
def send_command(self, command):
with open(self.fifo_name, 'w') as handle:
handle.write(command + '\n')
def volume_change(self, change):
logging.debug("MPlayer::volume_change %u", change)
self.send_command('volume %u' % (10 if (change > 0) else -10))
class Mpg123(PlayerInterface):
def __init__(self, exe, default_args, player_parameters):
if default_args is None:
default_args = ['--quiet', '--control']
super().__init__(exe, "mpg123", default_args, player_parameters)
def play(self, filepath, widget=None):
return self._play(filepath, allocate_pty=True)
def play_pause(self):
logging.debug("Mpg123::play_pause")
# Credit to https://stackoverflow.com/questions/17416158/python-2-7-subprocess-control-interaction-with-mpg123
if self.child:
logging.debug("Mpg123::play_pause")
os.write(self.child_stdin, b's')
def volume_change(self, change):
if self.child:
logging.debug("Mpg123::volume_change %u", change)
change = b'+' if (change > 0) else b'-'
change = 3 * change
os.write(self.child_stdin, change)
class MPVPlayer(PlayerInterface):
@staticmethod
def encode_command(command):
command = {'command': command}
command = json.dumps(command) + '\n'
command = command.encode()
return command
def __init__(self, exe, default_args, player_parameters):
self.ipc_address = '/tmp/picave.mpv-socket'
if default_args is None:
default_args = ['--geometry=0:0', '--ontop', '--input-ipc-server=%s' % self.ipc_address]
super().__init__(exe, "mpv", default_args, player_parameters)
self.pause = MPVPlayer.encode_command(['set_property_string', 'pause', 'yes'])
self.resume = MPVPlayer.encode_command(['set_property_string', 'pause', 'no'])
self.current_volume = 100
self.sock = None
if os.path.exists(self.ipc_address):
os.remove(self.ipc_address)
def __del__(self):
if os.path.exists(self.ipc_address):
os.remove(self.ipc_address)
def play(self, filepath, widget=None):
self.playing = True
self.sock = None
return self._play(filepath, allocate_pty=False)
def play_pause(self):
logging.debug("MPVPlayer::play_pause")
self.send_command(self.pause if (self.playing) else self.resume)
self.playing = not self.playing
def send_command(self, command):
if not self.child:
return
if self.sock is None:
try:
self.sock = socket.socket(socket.AF_UNIX)
self.sock.connect(self.ipc_address)
except FileNotFoundError:
logging.debug("No socket found for MPV IPC")
self.sock = None
return
logging.debug("MPVPlayer::send_command %s", command)
self.sock.sendall(command)
def volume_change(self, change):
self.current_volume = clip(0, self.current_volume + 5 * change, 100)
logging.debug("MPlayer::volume_change %u -> %u", change, self.current_volume)
self.send_command(MPVPlayer.encode_command(['set_property', 'volume', self.current_volume]))
class OmxPlayer(PlayerInterface):
def __init__(self, exe, default_args, player_parameters):
if default_args is None:
default_args = []
super().__init__(exe, "omxplayer", default_args, player_parameters)
def playback_finished_handler(self, player, exit_status):
self.child = None
def play(self, filepath, widget=None):
args = list(self.default_args)
if widget:
window = widget.get_allocation()
x0 = window.x + self.player_parameters.get('margin_left', 0)
y0 = window.y + self.player_parameters.get('margin_top', 0)
width = (window.width
- self.player_parameters.get('margin_right', 0)
- self.player_parameters.get('margin_left', 0))
height = (window.height
- self.player_parameters.get('margin_top', 0)
- self.player_parameters.get('margin_bottom', 0))
physical_window = Rectangle(x0, y0, width, height)
video_size = get_video_size(filepath)
logging.debug("OmxPlayer.play: window=%u,%u,%u,%u, physical_window=%u,%u,%u,%u, video size=%s",
window.x, window.y, window.width, window.height,
physical_window.x, physical_window.y, physical_window.width, physical_window.height,
str(video_size))
width_ratio = physical_window.width / video_size.width
height_ratio = physical_window.height / video_size.height
draw_w = video_size.width * min(width_ratio, height_ratio)
draw_h = video_size.height * min(width_ratio, height_ratio)
assert draw_w <= window.width
assert draw_h <= window.height
draw_x1 = physical_window.x + (physical_window.width - draw_w) / 2
draw_y1 = physical_window.y + (physical_window.height - draw_h) / 2
draw_x2 = draw_x1 + draw_w
draw_y2 = draw_y1 + draw_h
args.extend(['--win', '%u,%u,%u,%u' % (draw_x1, draw_y1, draw_x2, draw_y2), '--aspect-mode', 'letterbox'])
if HAVE_OMXPLAYER:
# Use the wrapper, which allows full control
self.child = OMXPlayer(filepath, args=args)
self.child.exitEvent += self.playback_finished_handler
else:
logging.warning("Launching omxplayer without control")
cmd = [self.exe] + args + [filepath]
self.child = subprocess.Popen(cmd,
stdin=None,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def is_finished(self):
if HAVE_OMXPLAYER:
return self.child is None
else:
return super().is_finished()
def play_pause(self):
logging.debug("OmxPlayer::play_pause")
if self.child and HAVE_OMXPLAYER:
self.child.play_pause()
def stop(self):
logging.debug("OmxPlayer::stop")
if HAVE_OMXPLAYER:
if self.child:
self.child.stop()
self.child = None
# else it's already terminated
else:
# Do not use super().stop(): omxplayer is a shell script that runs
# omxplayer.bin
# Killing omxplayer does not kill the actual video player, leaving
# a full-screen application that cannot be terminated...
subprocess.run(['pkill', 'omxplayer.bin'])
def volume_change(self, change):
if HAVE_OMXPLAYER and self.child:
# omxplayer volume is [0, 10] - with 1 = 100%
current_volume = self.child.volume()
volume = clip(0, current_volume + change / 10, 10)
logging.debug("OmxPlayer::volume_change %u -> %u", current_volume, volume)
self.child.set_volume(volume)
class LibVlcPlayer(PlayerInterface):
def __init__(self, exe, default_args, player_parameters):
assert HAVE_LIBVLC
super().__init__(exe, "libvlc", default_args, player_parameters)
self.video_player = None
self.playing = False
self.video_file_width = None # The natural size of the video
def is_finished(self):
if self.video_player is None:
return True
logging.debug("is_finished: %f", self.video_player.get_position())
return self.video_player.get_state() == vlc.State.Ended
def play(self, filepath, widget=None):
self.playing = True
self.video_file_width = get_video_size(filepath).width
self.vlcInstance = vlc.Instance("--no-xlib")
self.video_player = self.vlcInstance.media_player_new()
self.video_player.set_mrl(filepath.as_uri())
self.video_player.play()
if widget:
self.set_player_window(widget)
self.set_video_scale(widget.get_allocation())
def play_pause(self):
# Must call play() before play_pause() will do anything
assert self.video_player
if self.playing:
self.video_player.pause()
else:
self.video_player.play()
self.playing = not self.playing
def stop(self):
if self.video_player:
self.video_player.stop()
self.video_player = None
self.vlcInstance = None
def window_size_changed(self, new_size):
assert self.video_file_width
self.set_video_scale(new_size)
def set_video_scale(self, video_area_allocation):
if video_area_allocation.width > self.video_file_width:
# Don't attempt to scale up: the R-Pi isn't up to it
self.video_player.video_set_scale(1.0)
else:
# automatically scale down to fit the window
self.video_player.video_set_scale(0.0)
def set_player_window(self, widget):
logging.debug("set_player_window")
if sys.platform == 'win32':
raise NotImplementedError()
elif sys.platform == 'darwin':
self.set_player_window_darwin(widget)
else:
self.set_player_window_x11(widget)
def set_player_window_darwin(self, widget):
# https://gitlab.gnome.org/GNOME/pygobject/issues/112
# and https://www.mail-archive.com/vlc-commits@videolan.org/msg55659.html
# and https://github.com/oaubert/python-vlc/blob/master/examples/gtkvlc.py
window = widget.get_window()
getpointer = ctypes.pythonapi.PyCapsule_GetPointer
getpointer.restype = ctypes.c_void_p
getpointer.argtypes = [ctypes.py_object]
pointer = getpointer(window.__gpointer__, None)
libgdk = ctypes.CDLL("libgdk-3.dylib")
get_nsview = libgdk.gdk_quartz_window_get_nsview
get_nsview.restype = ctypes.c_void_p
get_nsview.argtypes = [ctypes.c_void_p]
handle = get_nsview(pointer)
self.video_player.set_nsobject(handle)
def set_player_window_x11(self, widget):
win_id = widget.get_window().get_xid()
self.video_player.set_xwindow(win_id)
class VlcPlayer(PlayerInterface):
def __init__(self, exe, default_args, player_parameters):
self.vlc_port = 28771 # 28771 = 0x7063; 0x70=ord('p'), 0x63=ord('c')
self.vlc_password = 'picave'
if default_args is None:
default_args = ['--video-on-top',
'--control', 'http',
'--http-host', 'localhost',
'--http-port', str(self.vlc_port),
'--http-password', self.vlc_password]
super().__init__(exe, "vlc", default_args, player_parameters)
def play(self, filepath, widget=None):
cmd = [self.exe] + self.default_args + [filepath.resolve().as_uri()]
logging.debug("VlcPlayer::play %s", cmd)
self.child = subprocess.Popen(cmd)
def play_pause(self):
logging.debug("VlcPlayer::play_pause")
self.send_command(command='pl_pause')
def send_command(self, **kwargs):
if not self.child:
return
params = urllib.parse.urlencode(kwargs)
addr = 'http://localhost:%u/requests/status.xml?%s' % (self.vlc_port, params)
logging.debug("VlcPlayer::send_command %s", addr)
response = requests.get(addr, auth=('', self.vlc_password))
if not response.ok:
logging.warning("VLC response: %s", response)
def volume_change(self, change):
change = '%+u' % (change * 8)
self.send_command(command='volume', val=change)
PlayerLookup = {
'mpg123': Mpg123,
'mplayer': MPlayer,
'mpv': MPVPlayer,
'omxplayer': OmxPlayer,
'libvlc': LibVlcPlayer,
'vlc': VlcPlayer
}
|
#!/usr/bin/env python
# rgb2colorname.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, https://github.com/paarthneekhara
# Usage:
# Explained in https://github.com/jetbloom/rgb2colorname/blob/master/README.md
import numpy as np
from scipy import spatial
A = np.array([ \
[240,248,255] \
,[250,235,215] \
,[255,239,219] \
,[238,223,204] \
,[205,192,176] \
,[139,131,120] \
,[0,255,255] \
,[127,255,212] \
,[127,255,212] \
,[118,238,198] \
,[102,205,170] \
,[69,139,116] \
,[240,255,255] \
,[240,255,255] \
,[224,238,238] \
,[193,205,205] \
,[131,139,139] \
,[245,245,220] \
,[255,228,196] \
,[255,228,196] \
,[238,213,183] \
,[205,183,158] \
,[139,125,107] \
,[0,0,0] \
,[255,235,205] \
,[0,0,255] \
,[0,0,255] \
,[0,0,238] \
,[0,0,205] \
,[0,0,139] \
,[138,43,226] \
,[165,42,42] \
,[255,64,64] \
,[238,59,59] \
,[205,51,51] \
,[139,35,35] \
,[222,184,135] \
,[255,211,155] \
,[238,197,145] \
,[205,170,125] \
,[139,115,85] \
,[95,158,160] \
,[152,245,255] \
,[142,229,238] \
,[122,197,205] \
,[83,134,139] \
,[127,255,0] \
,[127,255,0] \
,[118,238,0] \
,[102,205,0] \
,[69,139,0] \
,[210,105,30] \
,[255,127,36] \
,[238,118,33] \
,[205,102,29] \
,[139,69,19] \
,[255,127,80] \
,[255,114,86] \
,[238,106,80] \
,[205,91,69] \
,[139,62,47] \
,[100,149,237] \
,[255,248,220] \
,[255,248,220] \
,[238,232,205] \
,[205,200,177] \
,[139,136,120] \
,[220,20,60] \
,[0,255,255] \
,[0,255,255] \
,[0,238,238] \
,[0,205,205] \
,[0,139,139] \
,[0,0,139] \
,[0,139,139] \
,[184,134,11] \
,[255,185,15] \
,[238,173,14] \
,[205,149,12] \
,[139,101,8] \
,[169,169,169] \
,[0,100,0] \
,[189,183,107] \
,[139,0,139] \
,[85,107,47] \
,[202,255,112] \
,[188,238,104] \
,[162,205,90] \
,[110,139,61] \
,[255,140,0] \
,[255,127,0] \
,[238,118,0] \
,[205,102,0] \
,[139,69,0] \
,[153,50,204] \
,[191,62,255] \
,[178,58,238] \
,[154,50,205] \
,[104,34,139] \
,[139,0,0] \
,[233,150,122] \
,[143,188,143] \
,[193,255,193] \
,[180,238,180] \
,[155,205,155] \
,[105,139,105] \
,[72,61,139] \
,[47,79,79] \
,[151,255,255] \
,[141,238,238] \
,[121,205,205] \
,[82,139,139] \
,[0,206,209] \
,[148,0,211] \
,[255,20,147] \
,[255,20,147] \
,[238,18,137] \
,[205,16,118] \
,[139,10,80] \
,[0,191,255] \
,[0,191,255] \
,[0,178,238] \
,[0,154,205] \
,[0,104,139] \
,[105,105,105] \
,[30,144,255] \
,[30,144,255] \
,[28,134,238] \
,[24,116,205] \
,[16,78,139] \
,[178,34,34] \
,[255,48,48] \
,[238,44,44] \
,[205,38,38] \
,[139,26,26] \
,[255,250,240] \
,[255,250,240] \
,[34,139,34] \
,[255,0,255] \
,[220,220,220] \
,[248,248,255] \
,[255,215,0] \
,[255,215,0] \
,[238,201,0] \
,[205,173,0] \
,[139,117,0] \
,[218,165,32] \
,[255,193,37] \
,[238,180,34] \
,[205,155,29] \
,[139,105,20] \
,[128,128,128] \
,[190,190,190] \
,[0,0,0] \
,[3,3,3] \
,[26,26,26] \
,[255,255,255] \
,[28,28,28] \
,[31,31,31] \
,[33,33,33] \
,[36,36,36] \
,[38,38,38] \
,[41,41,41] \
,[43,43,43] \
,[46,46,46] \
,[48,48,48] \
,[5,5,5] \
,[51,51,51] \
,[54,54,54] \
,[56,56,56] \
,[59,59,59] \
,[61,61,61] \
,[64,64,64] \
,[66,66,66] \
,[69,69,69] \
,[71,71,71] \
,[74,74,74] \
,[8,8,8] \
,[77,77,77] \
,[79,79,79] \
,[82,82,82] \
,[84,84,84] \
,[87,87,87] \
,[89,89,89] \
,[92,92,92] \
,[94,94,94] \
,[97,97,97] \
,[99,99,99] \
,[10,10,10] \
,[2,102,102] \
,[105,105,105] \
,[107,107,107] \
,[110,110,110] \
,[112,112,112] \
,[115,115,115] \
,[117,117,117] \
,[120,120,120] \
,[122,122,122] \
,[125,125,125] \
,[13,13,13] \
,[127,127,127] \
,[130,130,130] \
,[133,133,133] \
,[135,135,135] \
,[138,138,138] \
,[140,140,140] \
,[143,143,143] \
,[145,145,145] \
,[148,148,148] \
,[150,150,150] \
,[15,15,15] \
,[153,153,153] \
,[156,156,156] \
,[158,158,158] \
,[161,161,161] \
,[163,163,163] \
,[166,166,166] \
,[168,168,168] \
,[171,171,171] \
,[173,173,173] \
,[176,176,176] \
,[18,18,18] \
,[179,179,179] \
,[181,181,181] \
,[184,184,184] \
,[186,186,186] \
,[189,189,189] \
,[191,191,191] \
,[194,194,194] \
,[196,196,196] \
,[199,199,199] \
,[201,201,201] \
,[20,20,20] \
,[204,204,204] \
,[207,207,207] \
,[209,209,209] \
,[212,212,212] \
,[214,214,214] \
,[217,217,217] \
,[219,219,219] \
,[222,222,222] \
,[224,224,224] \
,[227,227,227] \
,[23,23,23] \
,[229,229,229] \
,[232,232,232] \
,[235,235,235] \
,[237,237,237] \
,[240,240,240] \
,[242,242,242] \
,[245,245,245] \
,[247,247,247] \
,[250,250,250] \
,[252,252,252] \
,[0,128,0] \
,[0,255,0] \
,[0,255,0] \
,[0,238,0] \
,[0,205,0] \
,[0,139,0] \
,[173,255,47] \
,[240,255,240] \
,[240,255,240] \
,[224,238,224] \
,[193,205,193] \
,[131,139,131] \
,[255,105,180] \
,[255,110,180] \
,[238,106,167] \
,[205,96,144] \
,[139,58,98] \
,[205,92,92] \
,[255,106,106] \
,[238,99,99] \
,[205,85,85] \
,[139,58,58] \
,[75,0,130] \
,[255,255,240] \
,[255,255,240] \
,[238,238,224] \
,[205,205,193] \
,[139,139,131] \
,[240,230,140] \
,[255,246,143] \
,[238,230,133] \
,[205,198,115] \
,[139,134,78] \
,[230,230,250] \
,[255,240,245] \
,[255,240,245] \
,[238,224,229] \
,[205,193,197] \
,[139,131,134] \
,[124,252,0] \
,[255,250,205] \
,[255,250,205] \
,[238,233,191] \
,[205,201,165] \
,[139,137,112] \
,[173,216,230] \
,[191,239,255] \
,[178,223,238] \
,[154,192,205] \
,[104,131,139] \
,[240,128,128] \
,[224,255,255] \
,[224,255,255] \
,[209,238,238] \
,[180,205,205] \
,[122,139,139] \
,[238,221,130] \
,[255,236,139] \
,[238,220,130] \
,[205,190,112] \
,[139,129,76] \
,[250,250,210] \
,[211,211,211] \
,[144,238,144] \
,[255,182,193] \
,[255,174,185] \
,[238,162,173] \
,[205,140,149] \
,[139,95,101] \
,[255,160,122] \
,[255,160,122] \
,[238,149,114] \
,[205,129,98] \
,[139,87,66] \
,[32,178,170] \
,[135,206,250] \
,[176,226,255] \
,[164,211,238] \
,[141,182,205] \
,[96,123,139] \
,[132,112,255] \
,[119,136,153] \
,[176,196,222] \
,[202,225,255] \
,[188,210,238] \
,[162,181,205] \
,[110,123,139] \
,[255,255,224] \
,[255,255,224] \
,[238,238,209] \
,[205,205,180] \
,[139,139,122] \
,[0,255,0] \
,[50,205,50] \
,[250,240,230] \
,[255,0,255] \
,[255,0,255] \
,[238,0,238] \
,[205,0,205] \
,[139,0,139] \
,[128,0,0] \
,[176,48,96] \
,[255,52,179] \
,[238,48,167] \
,[205,41,144] \
,[139,28,98] \
,[102,205,170] \
,[0,0,205] \
,[186,85,211] \
,[224,102,255] \
,[209,95,238] \
,[180,82,205] \
,[122,55,139] \
,[147,112,219] \
,[171,130,255] \
,[159,121,238] \
,[137,104,205] \
,[93,71,139] \
,[60,179,113] \
,[123,104,238] \
,[0,250,154] \
,[72,209,204] \
,[199,21,133] \
,[25,25,112] \
,[245,255,250] \
,[255,228,225] \
,[255,228,225] \
,[238,213,210] \
,[205,183,181] \
,[139,125,123] \
,[255,228,181] \
,[255,222,173] \
,[255,222,173] \
,[238,207,161] \
,[205,179,139] \
,[139,121,94] \
,[0,0,128] \
,[0,0,128] \
,[253,245,230] \
,[128,128,0] \
,[107,142,35] \
,[192,255,62] \
,[179,238,58] \
,[154,205,50] \
,[105,139,34] \
,[255,165,0] \
,[255,165,0] \
,[238,154,0] \
,[205,133,0] \
,[139,90,0] \
,[255,69,0] \
,[255,69,0] \
,[238,64,0] \
,[205,55,0] \
,[139,37,0] \
,[218,112,214] \
,[255,131,250] \
,[238,122,233] \
,[205,105,201] \
,[139,71,137] \
,[238,232,170] \
,[152,251,152] \
,[154,255,154] \
,[144,238,144] \
,[124,205,124] \
,[84,139,84] \
,[175,238,238] \
,[187,255,255] \
,[174,238,238] \
,[150,205,205] \
,[102,139,139] \
,[219,112,147] \
,[255,130,171] \
,[238,121,159] \
,[205,104,137] \
,[139,71,93] \
,[255,239,213] \
,[255,218,185] \
,[255,218,185] \
,[238,203,173] \
,[205,175,149] \
,[139,119,101] \
,[205,133,63] \
,[255,192,203] \
,[255,181,197] \
,[238,169,184] \
,[205,145,158] \
,[139,99,108] \
,[221,160,221] \
,[255,187,255] \
,[238,174,238] \
,[205,150,205] \
,[139,102,139] \
,[176,224,230] \
,[128,0,128] \
,[160,32,240] \
,[155,48,255] \
,[145,44,238] \
,[125,38,205] \
,[85,26,139] \
,[102,51,153] \
,[255,0,0] \
,[255,0,0] \
,[238,0,0] \
,[205,0,0] \
,[139,0,0] \
,[188,143,143] \
,[255,193,193] \
,[238,180,180] \
,[205,155,155] \
,[139,105,105] \
,[65,105,225] \
,[72,118,255] \
,[67,110,238] \
,[58,95,205] \
,[39,64,139] \
,[139,69,19] \
,[250,128,114] \
,[255,140,105] \
,[238,130,98] \
,[205,112,84] \
,[139,76,57] \
,[244,164,96] \
,[46,139,87] \
,[84,255,159] \
,[78,238,148] \
,[67,205,128] \
,[46,139,87] \
,[255,245,238] \
,[255,245,238] \
,[238,229,222] \
,[205,197,191] \
,[139,134,130] \
,[160,82,45] \
,[255,130,71] \
,[238,121,66] \
,[205,104,57] \
,[139,71,38] \
,[192,192,192] \
,[135,206,235] \
,[135,206,255] \
,[126,192,238] \
,[108,166,205] \
,[74,112,139] \
,[106,90,205] \
,[131,111,255] \
,[122,103,238] \
,[105,89,205] \
,[71,60,139] \
,[112,128,144] \
,[198,226,255] \
,[185,211,238] \
,[159,182,205] \
,[108,123,139] \
,[255,250,250] \
,[255,250,250] \
,[238,233,233] \
,[205,201,201] \
,[139,137,137] \
,[0,255,127] \
,[0,255,127] \
,[0,238,118] \
,[0,205,102] \
,[0,139,69] \
,[70,130,180] \
,[99,184,255] \
,[92,172,238] \
,[79,148,205] \
,[54,100,139] \
,[210,180,140] \
,[255,165,79] \
,[238,154,73] \
,[205,133,63] \
,[139,90,43] \
,[0,128,128] \
,[216,191,216] \
,[255,225,255] \
,[238,210,238] \
,[205,181,205] \
,[139,123,139] \
,[255,99,71] \
,[255,99,71] \
,[238,92,66] \
,[205,79,57] \
,[139,54,38] \
,[64,224,208] \
,[0,245,255] \
,[0,229,238] \
,[0,197,205] \
,[0,134,139] \
,[238,130,238] \
,[208,32,144] \
,[255,62,150] \
,[238,58,140] \
,[205,50,120] \
,[139,34,82] \
,[128,128,128] \
,[0,128,0] \
,[128,0,0] \
,[128,0,128] \
,[245,222,179] \
,[255,231,186] \
,[238,216,174] \
,[205,186,150] \
,[139,126,102] \
,[255,255,255] \
,[245,245,245] \
,[190,190,190] \
,[0,255,0] \
,[176,48,96] \
,[160,32,240] \
,[255,255,0] \
,[255,255,0] \
,[238,238,0] \
,[205,205,0] \
,[139,139,0] \
,[154,205,50] \
])
### End of paste ###
B = np.array([ \
[240,248,255,"AliceBlue","#F0F8FF"] \
,[250,235,215,"AntiqueWhite","#FAEBD7"] \
,[255,239,219,"AntiqueWhite1","#FFEFDB"] \
,[238,223,204,"AntiqueWhite2","#EEDFCC"] \
,[205,192,176,"AntiqueWhite3","#CDC0B0"] \
,[139,131,120,"AntiqueWhite4","#8B8378"] \
,[0,255,255,"aqua","#00FFFF"] \
,[127,255,212,"aquamarine","#7FFFD4"] \
,[127,255,212,"aquamarine1","#7FFFD4"] \
,[118,238,198,"aquamarine2","#76EEC6"] \
,[102,205,170,"aquamarine3","#66CDAA"] \
,[69,139,116,"aquamarine4","#458B74"] \
,[240,255,255,"azure","#F0FFFF"] \
,[240,255,255,"azure1","#F0FFFF"] \
,[224,238,238,"azure2","#E0EEEE"] \
,[193,205,205,"azure3","#C1CDCD"] \
,[131,139,139,"azure4","#838B8B"] \
,[245,245,220,"beige","#F5F5DC"] \
,[255,228,196,"bisque","#FFE4C4"] \
,[255,228,196,"bisque1","#FFE4C4"] \
,[238,213,183,"bisque2","#EED5B7"] \
,[205,183,158,"bisque3","#CDB79E"] \
,[139,125,107,"bisque4","#8B7D6B"] \
,[0,0,0,"black","#000000"] \
,[255,235,205,"BlanchedAlmond","#FFEBCD"] \
,[0,0,255,"blue","#0000FF"] \
,[0,0,255,"blue1","#0000FF"] \
,[0,0,238,"blue2","#0000EE"] \
,[0,0,205,"blue3","#0000CD"] \
,[0,0,139,"blue4","#00008B"] \
,[138,43,226,"BlueViolet","#8A2BE2"] \
,[165,42,42,"brown","#A52A2A"] \
,[255,64,64,"brown1","#FF4040"] \
,[238,59,59,"brown2","#EE3B3B"] \
,[205,51,51,"brown3","#CD3333"] \
,[139,35,35,"brown4","#8B2323"] \
,[222,184,135,"burlywood","#DEB887"] \
,[255,211,155,"burlywood1","#FFD39B"] \
,[238,197,145,"burlywood2","#EEC591"] \
,[205,170,125,"burlywood3","#CDAA7D"] \
,[139,115,85,"burlywood4","#8B7355"] \
,[95,158,160,"CadetBlue","#5F9EA0"] \
,[152,245,255,"CadetBlue1","#98F5FF"] \
,[142,229,238,"CadetBlue2","#8EE5EE"] \
,[122,197,205,"CadetBlue3","#7AC5CD"] \
,[83,134,139,"CadetBlue4","#53868B"] \
,[127,255,0,"chartreuse","#7FFF00"] \
,[127,255,0,"chartreuse1","#7FFF00"] \
,[118,238,0,"chartreuse2","#76EE00"] \
,[102,205,0,"chartreuse3","#66CD00"] \
,[69,139,0,"chartreuse4","#458B00"] \
,[210,105,30,"chocolate","#D2691E"] \
,[255,127,36,"chocolate1","#FF7F24"] \
,[238,118,33,"chocolate2","#EE7621"] \
,[205,102,29,"chocolate3","#CD661D"] \
,[139,69,19,"chocolate4","#8B4513"] \
,[255,127,80,"coral","#FF7F50"] \
,[255,114,86,"coral1","#FF7256"] \
,[238,106,80,"coral2","#EE6A50"] \
,[205,91,69,"coral3","#CD5B45"] \
,[139,62,47,"coral4","#8B3E2F"] \
,[100,149,237,"CornflowerBlue","#6495ED"] \
,[255,248,220,"cornsilk","#FFF8DC"] \
,[255,248,220,"cornsilk1","#FFF8DC"] \
,[238,232,205,"cornsilk2","#EEE8CD"] \
,[205,200,177,"cornsilk3","#CDC8B1"] \
,[139,136,120,"cornsilk4","#8B8878"] \
,[220,20,60,"crimson","#DC143C"] \
,[0,255,255,"cyan","#00FFFF"] \
,[0,255,255,"cyan1","#00FFFF"] \
,[0,238,238,"cyan2","#00EEEE"] \
,[0,205,205,"cyan3","#00CDCD"] \
,[0,139,139,"cyan4","#008B8B"] \
,[0,0,139,"DarkBlue","#00008B"] \
,[0,139,139,"DarkCyan","#008B8B"] \
,[184,134,11,"DarkGoldenrod","#B8860B"] \
,[255,185,15,"DarkGoldenrod1","#FFB90F"] \
,[238,173,14,"DarkGoldenrod2","#EEAD0E"] \
,[205,149,12,"DarkGoldenrod3","#CD950C"] \
,[139,101,8,"DarkGoldenrod4","#8B6508"] \
,[169,169,169,"DarkGray","#A9A9A9"] \
,[0,100,0,"DarkGreen","#006400"] \
,[189,183,107,"DarkKhaki","#BDB76B"] \
,[139,0,139,"DarkMagenta","#8B008B"] \
,[85,107,47,"DarkOliveGreen","#556B2F"] \
,[202,255,112,"DarkOliveGreen1","#CAFF70"] \
,[188,238,104,"DarkOliveGreen2","#BCEE68"] \
,[162,205,90,"DarkOliveGreen3","#A2CD5A"] \
,[110,139,61,"DarkOliveGreen4","#6E8B3D"] \
,[255,140,0,"DarkOrange","#FF8C00"] \
,[255,127,0,"DarkOrange1","#FF7F00"] \
,[238,118,0,"DarkOrange2","#EE7600"] \
,[205,102,0,"DarkOrange3","#CD6600"] \
,[139,69,0,"DarkOrange4","#8B4500"] \
,[153,50,204,"DarkOrchid","#9932CC"] \
,[191,62,255,"DarkOrchid1","#BF3EFF"] \
,[178,58,238,"DarkOrchid2","#B23AEE"] \
,[154,50,205,"DarkOrchid3","#9A32CD"] \
,[104,34,139,"DarkOrchid4","#68228B"] \
,[139,0,0,"DarkRed","#8B0000"] \
,[233,150,122,"DarkSalmon","#E9967A"] \
,[143,188,143,"DarkSeaGreen","#8FBC8F"] \
,[193,255,193,"DarkSeaGreen1","#C1FFC1"] \
,[180,238,180,"DarkSeaGreen2","#B4EEB4"] \
,[155,205,155,"DarkSeaGreen3","#9BCD9B"] \
,[105,139,105,"DarkSeaGreen4","#698B69"] \
,[72,61,139,"DarkSlateBlue","#483D8B"] \
,[47,79,79,"DarkSlateGray","#2F4F4F"] \
,[151,255,255,"DarkSlateGray1","#97FFFF"] \
,[141,238,238,"DarkSlateGray2","#8DEEEE"] \
,[121,205,205,"DarkSlateGray3","#79CDCD"] \
,[82,139,139,"DarkSlateGray4","#528B8B"] \
,[0,206,209,"DarkTurquoise","#00CED1"] \
,[148,0,211,"DarkViolet","#9400D3"] \
,[255,20,147,"DeepPink","#FF1493"] \
,[255,20,147,"DeepPink1","#FF1493"] \
,[238,18,137,"DeepPink2","#EE1289"] \
,[205,16,118,"DeepPink3","#CD1076"] \
,[139,10,80,"DeepPink4","#8B0A50"] \
,[0,191,255,"DeepSkyBlue","#00BFFF"] \
,[0,191,255,"DeepSkyBlue1","#00BFFF"] \
,[0,178,238,"DeepSkyBlue2","#00B2EE"] \
,[0,154,205,"DeepSkyBlue3","#009ACD"] \
,[0,104,139,"DeepSkyBlue4","#00688B"] \
,[105,105,105,"DimGray","#696969"] \
,[30,144,255,"DodgerBlue","#1E90FF"] \
,[30,144,255,"DodgerBlue1","#1E90FF"] \
,[28,134,238,"DodgerBlue2","#1C86EE"] \
,[24,116,205,"DodgerBlue3","#1874CD"] \
,[16,78,139,"DodgerBlue4","#104E8B"] \
,[178,34,34,"firebrick","#B22222"] \
,[255,48,48,"firebrick1","#FF3030"] \
,[238,44,44,"firebrick2","#EE2C2C"] \
,[205,38,38,"firebrick3","#CD2626"] \
,[139,26,26,"firebrick4","#8B1A1A"] \
,[255,250,240,"floral-white","#FFFAF0"] \
,[255,250,240,"FloralWhite","#FFFAF0"] \
,[34,139,34,"ForestGreen","#228B22"] \
,[255,0,255,"fuchsia","#FF00FF"] \
,[220,220,220,"gainsboro","#DCDCDC"] \
,[248,248,255,"GhostWhite","#F8F8FF"] \
,[255,215,0,"gold","#FFD700"] \
,[255,215,0,"gold1","#FFD700"] \
,[238,201,0,"gold2","#EEC900"] \
,[205,173,0,"gold3","#CDAD00"] \
,[139,117,0,"gold4","#8B7500"] \
,[218,165,32,"goldenrod","#DAA520"] \
,[255,193,37,"goldenrod1","#FFC125"] \
,[238,180,34,"goldenrod2","#EEB422"] \
,[205,155,29,"goldenrod3","#CD9B1D"] \
,[139,105,20,"goldenrod4","#8B6914"] \
,[128,128,128,"gray","#808080"] \
,[190,190,190,"gray","#BEBEBE"] \
,[0,0,0,"gray0","#000000"] \
,[3,3,3,"gray1","#030303"] \
,[26,26,26,"gray10","#1A1A1A"] \
,[255,255,255,"gray100","#FFFFFF"] \
,[28,28,28,"gray11","#1C1C1C"] \
,[31,31,31,"gray12","#1F1F1F"] \
,[33,33,33,"gray13","#212121"] \
,[36,36,36,"gray14","#242424"] \
,[38,38,38,"gray15","#262626"] \
,[41,41,41,"gray16","#292929"] \
,[43,43,43,"gray17","#2B2B2B"] \
,[46,46,46,"gray18","#2E2E2E"] \
,[48,48,48,"gray19","#303030"] \
,[5,5,5,"gray2","#050505"] \
,[51,51,51,"gray20","#333333"] \
,[54,54,54,"gray21","#363636"] \
,[56,56,56,"gray22","#383838"] \
,[59,59,59,"gray23","#3B3B3B"] \
,[61,61,61,"gray24","#3D3D3D"] \
,[64,64,64,"gray25","#404040"] \
,[66,66,66,"gray26","#424242"] \
,[69,69,69,"gray27","#454545"] \
,[71,71,71,"gray28","#474747"] \
,[74,74,74,"gray29","#4A4A4A"] \
,[8,8,8,"gray3","#080808"] \
,[77,77,77,"gray30","#4D4D4D"] \
,[79,79,79,"gray31","#4F4F4F"] \
,[82,82,82,"gray32","#525252"] \
,[84,84,84,"gray33","#545454"] \
,[87,87,87,"gray34","#575757"] \
,[89,89,89,"gray35","#595959"] \
,[92,92,92,"gray36","#5C5C5C"] \
,[94,94,94,"gray37","#5E5E5E"] \
,[97,97,97,"gray38","#616161"] \
,[99,99,99,"gray39","#636363"] \
,[10,10,10,"gray4","#0A0A0A"] \
,[2,102,102,"gray40","#026666"] \
,[105,105,105,"gray41","#696969"] \
,[107,107,107,"gray42","#6B6B6B"] \
,[110,110,110,"gray43","#6E6E6E"] \
,[112,112,112,"gray44","#707070"] \
,[115,115,115,"gray45","#737373"] \
,[117,117,117,"gray46","#757575"] \
,[120,120,120,"gray47","#787878"] \
,[122,122,122,"gray48","#7A7A7A"] \
,[125,125,125,"gray49","#7D7D7D"] \
,[13,13,13,"gray5","#0D0D0D"] \
,[127,127,127,"gray50","#7F7F7F"] \
,[130,130,130,"gray51","#828282"] \
,[133,133,133,"gray52","#858585"] \
,[135,135,135,"gray53","#878787"] \
,[138,138,138,"gray54","#8A8A8A"] \
,[140,140,140,"gray55","#8C8C8C"] \
,[143,143,143,"gray56","#8F8F8F"] \
,[145,145,145,"gray57","#919191"] \
,[148,148,148,"gray58","#949494"] \
,[150,150,150,"gray59","#969696"] \
,[15,15,15,"gray6","#0F0F0F"] \
,[153,153,153,"gray60","#999999"] \
,[156,156,156,"gray61","#9C9C9C"] \
,[158,158,158,"gray62","#9E9E9E"] \
,[161,161,161,"gray63","#A1A1A1"] \
,[163,163,163,"gray64","#A3A3A3"] \
,[166,166,166,"gray65","#A6A6A6"] \
,[168,168,168,"gray66","#A8A8A8"] \
,[171,171,171,"gray67","#ABABAB"] \
,[173,173,173,"gray68","#ADADAD"] \
,[176,176,176,"gray69","#B0B0B0"] \
,[18,18,18,"gray7","#121212"] \
,[179,179,179,"gray70","#B3B3B3"] \
,[181,181,181,"gray71","#B5B5B5"] \
,[184,184,184,"gray72","#B8B8B8"] \
,[186,186,186,"gray73","#BABABA"] \
,[189,189,189,"gray74","#BDBDBD"] \
,[191,191,191,"gray75","#BFBFBF"] \
,[194,194,194,"gray76","#C2C2C2"] \
,[196,196,196,"gray77","#C4C4C4"] \
,[199,199,199,"gray78","#C7C7C7"] \
,[201,201,201,"gray79","#C9C9C9"] \
,[20,20,20,"gray8","#141414"] \
,[204,204,204,"gray80","#CCCCCC"] \
,[207,207,207,"gray81","#CFCFCF"] \
,[209,209,209,"gray82","#D1D1D1"] \
,[212,212,212,"gray83","#D4D4D4"] \
,[214,214,214,"gray84","#D6D6D6"] \
,[217,217,217,"gray85","#D9D9D9"] \
,[219,219,219,"gray86","#DBDBDB"] \
,[222,222,222,"gray87","#DEDEDE"] \
,[224,224,224,"gray88","#E0E0E0"] \
,[227,227,227,"gray89","#E3E3E3"] \
,[23,23,23,"gray9","#171717"] \
,[229,229,229,"gray90","#E5E5E5"] \
,[232,232,232,"gray91","#E8E8E8"] \
,[235,235,235,"gray92","#EBEBEB"] \
,[237,237,237,"gray93","#EDEDED"] \
,[240,240,240,"gray94","#F0F0F0"] \
,[242,242,242,"gray95","#F2F2F2"] \
,[245,245,245,"gray96","#F5F5F5"] \
,[247,247,247,"gray97","#F7F7F7"] \
,[250,250,250,"gray98","#FAFAFA"] \
,[252,252,252,"gray99","#FCFCFC"] \
,[0,128,0,"green","#008000"] \
,[0,255,0,"green","#00FF00"] \
,[0,255,0,"green1","#00FF00"] \
,[0,238,0,"green2","#00EE00"] \
,[0,205,0,"green3","#00CD00"] \
,[0,139,0,"green4","#008B00"] \
,[173,255,47,"GreenYellow","#ADFF2F"] \
,[240,255,240,"honeydew","#F0FFF0"] \
,[240,255,240,"honeydew1","#F0FFF0"] \
,[224,238,224,"honeydew2","#E0EEE0"] \
,[193,205,193,"honeydew3","#C1CDC1"] \
,[131,139,131,"honeydew4","#838B83"] \
,[255,105,180,"HotPink","#FF69B4"] \
,[255,110,180,"HotPink1","#FF6EB4"] \
,[238,106,167,"HotPink2","#EE6AA7"] \
,[205,96,144,"HotPink3","#CD6090"] \
,[139,58,98,"HotPink4","#8B3A62"] \
,[205,92,92,"IndianRed","#CD5C5C"] \
,[255,106,106,"IndianRed1","#FF6A6A"] \
,[238,99,99,"IndianRed2","#EE6363"] \
,[205,85,85,"IndianRed3","#CD5555"] \
,[139,58,58,"IndianRed4","#8B3A3A"] \
,[75,0,130,"indigo","#4B0082"] \
,[255,255,240,"ivory","#FFFFF0"] \
,[255,255,240,"ivory1","#FFFFF0"] \
,[238,238,224,"ivory2","#EEEEE0"] \
,[205,205,193,"ivory3","#CDCDC1"] \
,[139,139,131,"ivory4","#8B8B83"] \
,[240,230,140,"khaki","#F0E68C"] \
,[255,246,143,"khaki1","#FFF68F"] \
,[238,230,133,"khaki2","#EEE685"] \
,[205,198,115,"khaki3","#CDC673"] \
,[139,134,78,"khaki4","#8B864E"] \
,[230,230,250,"lavender","#E6E6FA"] \
,[255,240,245,"LavenderBlush","#FFF0F5"] \
,[255,240,245,"LavenderBlush1","#FFF0F5"] \
,[238,224,229,"LavenderBlush2","#EEE0E5"] \
,[205,193,197,"LavenderBlush3","#CDC1C5"] \
,[139,131,134,"LavenderBlush4","#8B8386"] \
,[124,252,0,"LawnGreen","#7CFC00"] \
,[255,250,205,"LemonChiffon","#FFFACD"] \
,[255,250,205,"LemonChiffon1","#FFFACD"] \
,[238,233,191,"LemonChiffon2","#EEE9BF"] \
,[205,201,165,"LemonChiffon3","#CDC9A5"] \
,[139,137,112,"LemonChiffon4","#8B8970"] \
,[173,216,230,"LightBlue","#ADD8E6"] \
,[191,239,255,"LightBlue1","#BFEFFF"] \
,[178,223,238,"LightBlue2","#B2DFEE"] \
,[154,192,205,"LightBlue3","#9AC0CD"] \
,[104,131,139,"LightBlue4","#68838B"] \
,[240,128,128,"LightCoral","#F08080"] \
,[224,255,255,"LightCyan","#E0FFFF"] \
,[224,255,255,"LightCyan1","#E0FFFF"] \
,[209,238,238,"LightCyan2","#D1EEEE"] \
,[180,205,205,"LightCyan3","#B4CDCD"] \
,[122,139,139,"LightCyan4","#7A8B8B"] \
,[238,221,130,"LightGoldenrod","#EEDD82"] \
,[255,236,139,"LightGoldenrod1","#FFEC8B"] \
,[238,220,130,"LightGoldenrod2","#EEDC82"] \
,[205,190,112,"LightGoldenrod3","#CDBE70"] \
,[139,129,76,"LightGoldenrod4","#8B814C"] \
,[250,250,210,"LightGoldenrodYellow","#FAFAD2"] \
,[211,211,211,"LightGray","#D3D3D3"] \
,[144,238,144,"LightGreen","#90EE90"] \
,[255,182,193,"LightPink","#FFB6C1"] \
,[255,174,185,"LightPink1","#FFAEB9"] \
,[238,162,173,"LightPink2","#EEA2AD"] \
,[205,140,149,"LightPink3","#CD8C95"] \
,[139,95,101,"LightPink4","#8B5F65"] \
,[255,160,122,"LightSalmon","#FFA07A"] \
,[255,160,122,"LightSalmon1","#FFA07A"] \
,[238,149,114,"LightSalmon2","#EE9572"] \
,[205,129,98,"LightSalmon3","#CD8162"] \
,[139,87,66,"LightSalmon4","#8B5742"] \
,[32,178,170,"LightSeaGreen","#20B2AA"] \
,[135,206,250,"LightSkyBlue","#87CEFA"] \
,[176,226,255,"LightSkyBlue1","#B0E2FF"] \
,[164,211,238,"LightSkyBlue2","#A4D3EE"] \
,[141,182,205,"LightSkyBlue3","#8DB6CD"] \
,[96,123,139,"LightSkyBlue4","#607B8B"] \
,[132,112,255,"LightSlateBlue","#8470FF"] \
,[119,136,153,"LightSlateGray","#778899"] \
,[176,196,222,"LightSteelBlue","#B0C4DE"] \
,[202,225,255,"LightSteelBlue1","#CAE1FF"] \
,[188,210,238,"LightSteelBlue2","#BCD2EE"] \
,[162,181,205,"LightSteelBlue3","#A2B5CD"] \
,[110,123,139,"LightSteelBlue4","#6E7B8B"] \
,[255,255,224,"LightYellow","#FFFFE0"] \
,[255,255,224,"LightYellow1","#FFFFE0"] \
,[238,238,209,"LightYellow2","#EEEED1"] \
,[205,205,180,"LightYellow3","#CDCDB4"] \
,[139,139,122,"LightYellow4","#8B8B7A"] \
,[0,255,0,"lime","#00FF00"] \
,[50,205,50,"LimeGreen","#32CD32"] \
,[250,240,230,"linen","#FAF0E6"] \
,[255,0,255,"magenta","#FF00FF"] \
,[255,0,255,"magenta1","#FF00FF"] \
,[238,0,238,"magenta2","#EE00EE"] \
,[205,0,205,"magenta3","#CD00CD"] \
,[139,0,139,"magenta4","#8B008B"] \
,[128,0,0,"maroon","#800000"] \
,[176,48,96,"maroon","#B03060"] \
,[255,52,179,"maroon1","#FF34B3"] \
,[238,48,167,"maroon2","#EE30A7"] \
,[205,41,144,"maroon3","#CD2990"] \
,[139,28,98,"maroon4","#8B1C62"] \
,[102,205,170,"MediumAquamarine","#66CDAA"] \
,[0,0,205,"MediumBlue","#0000CD"] \
,[186,85,211,"MediumOrchid","#BA55D3"] \
,[224,102,255,"MediumOrchid1","#E066FF"] \
,[209,95,238,"MediumOrchid2","#D15FEE"] \
,[180,82,205,"MediumOrchid3","#B452CD"] \
,[122,55,139,"MediumOrchid4","#7A378B"] \
,[147,112,219,"MediumPurple","#9370DB"] \
,[171,130,255,"MediumPurple1","#AB82FF"] \
,[159,121,238,"MediumPurple2","#9F79EE"] \
,[137,104,205,"MediumPurple3","#8968CD"] \
,[93,71,139,"MediumPurple4","#5D478B"] \
,[60,179,113,"MediumSeaGreen","#3CB371"] \
,[123,104,238,"MediumSlateBlue","#7B68EE"] \
,[0,250,154,"MediumSpringGreen","#00FA9A"] \
,[72,209,204,"MediumTurquoise","#48D1CC"] \
,[199,21,133,"MediumVioletRed","#C71585"] \
,[25,25,112,"MidnightBlue","#191970"] \
,[245,255,250,"MintCream","#F5FFFA"] \
,[255,228,225,"MistyRose","#FFE4E1"] \
,[255,228,225,"MistyRose1","#FFE4E1"] \
,[238,213,210,"MistyRose2","#EED5D2"] \
,[205,183,181,"MistyRose3","#CDB7B5"] \
,[139,125,123,"MistyRose4","#8B7D7B"] \
,[255,228,181,"moccasin","#FFE4B5"] \
,[255,222,173,"NavajoWhite","#FFDEAD"] \
,[255,222,173,"NavajoWhite1","#FFDEAD"] \
,[238,207,161,"NavajoWhite2","#EECFA1"] \
,[205,179,139,"NavajoWhite3","#CDB38B"] \
,[139,121,94,"NavajoWhite4","#8B795E"] \
,[0,0,128,"navy","#000080"] \
,[0,0,128,"NavyBlue","#000080"] \
,[253,245,230,"OldLace","#FDF5E6"] \
,[128,128,0,"olive","#808000"] \
,[107,142,35,"OliveDrab","#6B8E23"] \
,[192,255,62,"OliveDrab1","#C0FF3E"] \
,[179,238,58,"OliveDrab2","#B3EE3A"] \
,[154,205,50,"OliveDrab3","#9ACD32"] \
,[105,139,34,"OliveDrab4","#698B22"] \
,[255,165,0,"orange","#FFA500"] \
,[255,165,0,"orange1","#FFA500"] \
,[238,154,0,"orange2","#EE9A00"] \
,[205,133,0,"orange3","#CD8500"] \
,[139,90,0,"orange4","#8B5A00"] \
,[255,69,0,"OrangeRed","#FF4500"] \
,[255,69,0,"OrangeRed1","#FF4500"] \
,[238,64,0,"OrangeRed2","#EE4000"] \
,[205,55,0,"OrangeRed3","#CD3700"] \
,[139,37,0,"OrangeRed4","#8B2500"] \
,[218,112,214,"orchid","#DA70D6"] \
,[255,131,250,"orchid1","#FF83FA"] \
,[238,122,233,"orchid2","#EE7AE9"] \
,[205,105,201,"orchid3","#CD69C9"] \
,[139,71,137,"orchid4","#8B4789"] \
,[238,232,170,"PaleGoldenrod","#EEE8AA"] \
,[152,251,152,"PaleGreen","#98FB98"] \
,[154,255,154,"PaleGreen1","#9AFF9A"] \
,[144,238,144,"PaleGreen2","#90EE90"] \
,[124,205,124,"PaleGreen3","#7CCD7C"] \
,[84,139,84,"PaleGreen4","#548B54"] \
,[175,238,238,"PaleTurquoise","#AFEEEE"] \
,[187,255,255,"PaleTurquoise1","#BBFFFF"] \
,[174,238,238,"PaleTurquoise2","#AEEEEE"] \
,[150,205,205,"PaleTurquoise3","#96CDCD"] \
,[102,139,139,"PaleTurquoise4","#668B8B"] \
,[219,112,147,"PaleVioletRed","#DB7093"] \
,[255,130,171,"PaleVioletRed1","#FF82AB"] \
,[238,121,159,"PaleVioletRed2","#EE799F"] \
,[205,104,137,"PaleVioletRed3","#CD6889"] \
,[139,71,93,"PaleVioletRed4","#8B475D"] \
,[255,239,213,"PapayaWhip","#FFEFD5"] \
,[255,218,185,"PeachPuff","#FFDAB9"] \
,[255,218,185,"PeachPuff1","#FFDAB9"] \
,[238,203,173,"PeachPuff2","#EECBAD"] \
,[205,175,149,"PeachPuff3","#CDAF95"] \
,[139,119,101,"PeachPuff4","#8B7765"] \
,[205,133,63,"peru","#CD853F"] \
,[255,192,203,"pink","#FFC0CB"] \
,[255,181,197,"pink1","#FFB5C5"] \
,[238,169,184,"pink2","#EEA9B8"] \
,[205,145,158,"pink3","#CD919E"] \
,[139,99,108,"pink4","#8B636C"] \
,[221,160,221,"plum","#DDA0DD"] \
,[255,187,255,"plum1","#FFBBFF"] \
,[238,174,238,"plum2","#EEAEEE"] \
,[205,150,205,"plum3","#CD96CD"] \
,[139,102,139,"plum4","#8B668B"] \
,[176,224,230,"PowderBlue","#B0E0E6"] \
,[128,0,128,"purple","#800080"] \
,[160,32,240,"purple","#A020F0"] \
,[155,48,255,"purple1","#9B30FF"] \
,[145,44,238,"purple2","#912CEE"] \
,[125,38,205,"purple3","#7D26CD"] \
,[85,26,139,"purple4","#551A8B"] \
,[102,51,153,"RebeccaPurple","#663399"] \
,[255,0,0,"red","#FF0000"] \
,[255,0,0,"red1","#FF0000"] \
,[238,0,0,"red2","#EE0000"] \
,[205,0,0,"red3","#CD0000"] \
,[139,0,0,"red4","#8B0000"] \
,[188,143,143,"RosyBrown","#BC8F8F"] \
,[255,193,193,"RosyBrown1","#FFC1C1"] \
,[238,180,180,"RosyBrown2","#EEB4B4"] \
,[205,155,155,"RosyBrown3","#CD9B9B"] \
,[139,105,105,"RosyBrown4","#8B6969"] \
,[65,105,225,"RoyalBlue","#4169E1"] \
,[72,118,255,"RoyalBlue1","#4876FF"] \
,[67,110,238,"RoyalBlue2","#436EEE"] \
,[58,95,205,"RoyalBlue3","#3A5FCD"] \
,[39,64,139,"RoyalBlue4","#27408B"] \
,[139,69,19,"SaddleBrown","#8B4513"] \
,[250,128,114,"salmon","#FA8072"] \
,[255,140,105,"salmon1","#FF8C69"] \
,[238,130,98,"salmon2","#EE8262"] \
,[205,112,84,"salmon3","#CD7054"] \
,[139,76,57,"salmon4","#8B4C39"] \
,[244,164,96,"SandyBrown","#F4A460"] \
,[46,139,87,"SeaGreen","#2E8B57"] \
,[84,255,159,"SeaGreen1","#54FF9F"] \
,[78,238,148,"SeaGreen2","#4EEE94"] \
,[67,205,128,"SeaGreen3","#43CD80"] \
,[46,139,87,"SeaGreen4","#2E8B57"] \
,[255,245,238,"seashell","#FFF5EE"] \
,[255,245,238,"seashell1","#FFF5EE"] \
,[238,229,222,"seashell2","#EEE5DE"] \
,[205,197,191,"seashell3","#CDC5BF"] \
,[139,134,130,"seashell4","#8B8682"] \
,[160,82,45,"sienna","#A0522D"] \
,[255,130,71,"sienna1","#FF8247"] \
,[238,121,66,"sienna2","#EE7942"] \
,[205,104,57,"sienna3","#CD6839"] \
,[139,71,38,"sienna4","#8B4726"] \
,[192,192,192,"silver","#C0C0C0"] \
,[135,206,235,"SkyBlue","#87CEEB"] \
,[135,206,255,"SkyBlue1","#87CEFF"] \
,[126,192,238,"SkyBlue2","#7EC0EE"] \
,[108,166,205,"SkyBlue3","#6CA6CD"] \
,[74,112,139,"SkyBlue4","#4A708B"] \
,[106,90,205,"SlateBlue","#6A5ACD"] \
,[131,111,255,"SlateBlue1","#836FFF"] \
,[122,103,238,"SlateBlue2","#7A67EE"] \
,[105,89,205,"SlateBlue3","#6959CD"] \
,[71,60,139,"SlateBlue4","#473C8B"] \
,[112,128,144,"SlateGray","#708090"] \
,[198,226,255,"SlateGray1","#C6E2FF"] \
,[185,211,238,"SlateGray2","#B9D3EE"] \
,[159,182,205,"SlateGray3","#9FB6CD"] \
,[108,123,139,"SlateGray4","#6C7B8B"] \
,[255,250,250,"snow","#FFFAFA"] \
,[255,250,250,"snow1","#FFFAFA"] \
,[238,233,233,"snow2","#EEE9E9"] \
,[205,201,201,"snow3","#CDC9C9"] \
,[139,137,137,"snow4","#8B8989"] \
,[0,255,127,"SpringGreen","#00FF7F"] \
,[0,255,127,"SpringGreen1","#00FF7F"] \
,[0,238,118,"SpringGreen2","#00EE76"] \
,[0,205,102,"SpringGreen3","#00CD66"] \
,[0,139,69,"SpringGreen4","#008B45"] \
,[70,130,180,"SteelBlue","#4682B4"] \
,[99,184,255,"SteelBlue1","#63B8FF"] \
,[92,172,238,"SteelBlue2","#5CACEE"] \
,[79,148,205,"SteelBlue3","#4F94CD"] \
,[54,100,139,"SteelBlue4","#36648B"] \
,[210,180,140,"tan","#D2B48C"] \
,[255,165,79,"tan1","#FFA54F"] \
,[238,154,73,"tan2","#EE9A49"] \
,[205,133,63,"tan3","#CD853F"] \
,[139,90,43,"tan4","#8B5A2B"] \
,[0,128,128,"teal","#008080"] \
,[216,191,216,"thistle","#D8BFD8"] \
,[255,225,255,"thistle1","#FFE1FF"] \
,[238,210,238,"thistle2","#EED2EE"] \
,[205,181,205,"thistle3","#CDB5CD"] \
,[139,123,139,"thistle4","#8B7B8B"] \
,[255,99,71,"tomato","#FF6347"] \
,[255,99,71,"tomato1","#FF6347"] \
,[238,92,66,"tomato2","#EE5C42"] \
,[205,79,57,"tomato3","#CD4F39"] \
,[139,54,38,"tomato4","#8B3626"] \
,[64,224,208,"turquoise","#40E0D0"] \
,[0,245,255,"turquoise1","#00F5FF"] \
,[0,229,238,"turquoise2","#00E5EE"] \
,[0,197,205,"turquoise3","#00C5CD"] \
,[0,134,139,"turquoise4","#00868B"] \
,[238,130,238,"violet","#EE82EE"] \
,[208,32,144,"VioletRed","#D02090"] \
,[255,62,150,"VioletRed1","#FF3E96"] \
,[238,58,140,"VioletRed2","#EE3A8C"] \
,[205,50,120,"VioletRed3","#CD3278"] \
,[139,34,82,"VioletRed4","#8B2252"] \
,[128,128,128,"WebGray","#808080"] \
,[0,128,0,"WebGreen","#008000"] \
,[128,0,0,"WebMaroon","#800000"] \
,[128,0,128,"WebPurple","#800080"] \
,[245,222,179,"wheat","#F5DEB3"] \
,[255,231,186,"wheat1","#FFE7BA"] \
,[238,216,174,"wheat2","#EED8AE"] \
,[205,186,150,"wheat3","#CDBA96"] \
,[139,126,102,"wheat4","#8B7E66"] \
,[255,255,255,"white","#FFFFFF"] \
,[245,245,245,"WhiteSmoke","#F5F5F5"] \
,[190,190,190,"X11Gray","#BEBEBE"] \
,[0,255,0,"X11Green","#00FF00"] \
,[176,48,96,"X11Maroon","#B03060"] \
,[160,32,240,"X11Purple","#A020F0"] \
,[255,255,0,"yellow","#FFFF00"] \
,[255,255,0,"yellow1","#FFFF00"] \
,[238,238,0,"yellow2","#EEEE00"] \
,[205,205,0,"yellow3","#CDCD00"] \
,[139,139,0,"yellow4","#8B8B00"] \
,[154,205,50,"YellowGreen","#9ACD32"] \
])
RGB= np.delete(A, np.s_[3:5], axis=1)
pt = [221, 185,135] # <-- the point to find
P=A[spatial.KDTree(A).query(pt)[1]]
print (P) # <-- the nearest point
for k in B:
if (int(k[0]) == P[0]) and (int(k[1]) == P[1]) and (int(k[2]) == P[2]):
print (k[3])
|
from .React import React
from .Vue import Vue
from .ReactTailwind import ReactTailwind
from .VueTailwind import VueTailwind
from .ReactBootstrap import ReactBootstrap
from .VueBootstrap import VueBootstrap
from .Tailwind import Tailwind
from .Bootstrap import Bootstrap
|
from __future__ import absolute_import
from itertools import izip_longest
import Queue
import MySQLdb as mysql
from MySQLdb.cursors import DictCursor
from dejavu.database import Database
class SQLDatabase(Database):
"""
Queries:
1) Find duplicates (shouldn't be any, though):
select `hash`, `song_id`, `offset`, count(*) cnt
from fingerprints
group by `hash`, `song_id`, `offset`
having cnt > 1
order by cnt asc;
2) Get number of hashes by song:
select song_id, song_name, count(song_id) as num
from fingerprints
natural join songs
group by song_id
order by count(song_id) desc;
3) get hashes with highest number of collisions
select
hash,
count(distinct song_id) as n
from fingerprints
group by `hash`
order by n DESC;
=> 26 different songs with same fingerprint (392 times):
select songs.song_name, fingerprints.offset
from fingerprints natural join songs
where fingerprints.hash = "08d3c833b71c60a7b620322ac0c0aba7bf5a3e73";
"""
type = "mysql"
# tables
FINGERPRINTS_TABLENAME = "fingerprints"
SONGS_TABLENAME = "songs"
# fields
FIELD_FINGERPRINTED = "fingerprinted"
# creates
CREATE_FINGERPRINTS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` binary(10) not null,
`%s` mediumint unsigned not null,
`%s` int unsigned not null,
INDEX (%s),
UNIQUE KEY `unique_constraint` (%s, %s, %s),
FOREIGN KEY (%s) REFERENCES %s(%s) ON DELETE CASCADE
) ENGINE=INNODB;""" % (
FINGERPRINTS_TABLENAME, Database.FIELD_HASH,
Database.FIELD_SONG_ID, Database.FIELD_OFFSET, Database.FIELD_HASH,
Database.FIELD_SONG_ID, Database.FIELD_OFFSET, Database.FIELD_HASH,
Database.FIELD_SONG_ID, SONGS_TABLENAME, Database.FIELD_SONG_ID
)
CREATE_SONGS_TABLE = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s` mediumint unsigned not null auto_increment,
`%s` varchar(250) not null,
`%s` tinyint default 0,
`%s` binary(20) not null,
PRIMARY KEY (`%s`),
UNIQUE KEY `%s` (`%s`)
) ENGINE=INNODB;""" % (
SONGS_TABLENAME, Database.FIELD_SONG_ID, Database.FIELD_SONGNAME, FIELD_FINGERPRINTED,
Database.FIELD_FILE_SHA1,
Database.FIELD_SONG_ID, Database.FIELD_SONG_ID, Database.FIELD_SONG_ID,
)
# inserts (ignores duplicates)
INSERT_FINGERPRINT = """
INSERT IGNORE INTO %s (%s, %s, %s) values
(UNHEX(%%s), %%s, %%s);
""" % (FINGERPRINTS_TABLENAME, Database.FIELD_HASH, Database.FIELD_SONG_ID, Database.FIELD_OFFSET)
INSERT_SONG = "INSERT INTO %s (%s, %s) values (%%s, UNHEX(%%s));" % (
SONGS_TABLENAME, Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1)
# selects
SELECT = """
SELECT %s, %s FROM %s WHERE %s = UNHEX(%%s);
""" % (Database.FIELD_SONG_ID, Database.FIELD_OFFSET, FINGERPRINTS_TABLENAME, Database.FIELD_HASH)
SELECT_MULTIPLE = """
SELECT HEX(%s), %s, %s FROM %s WHERE %s IN (%%s);
""" % (Database.FIELD_HASH, Database.FIELD_SONG_ID, Database.FIELD_OFFSET,
FINGERPRINTS_TABLENAME, Database.FIELD_HASH)
SELECT_ALL = """
SELECT %s, %s FROM %s;
""" % (Database.FIELD_SONG_ID, Database.FIELD_OFFSET, FINGERPRINTS_TABLENAME)
SELECT_SONG = """
SELECT %s, HEX(%s) as %s FROM %s WHERE %s = %%s;
""" % (Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1, Database.FIELD_FILE_SHA1, SONGS_TABLENAME, Database.FIELD_SONG_ID)
SELECT_NUM_FINGERPRINTS = """
SELECT COUNT(*) as n FROM %s
""" % (FINGERPRINTS_TABLENAME)
SELECT_UNIQUE_SONG_IDS = """
SELECT COUNT(DISTINCT %s) as n FROM %s WHERE %s = 1;
""" % (Database.FIELD_SONG_ID, SONGS_TABLENAME, FIELD_FINGERPRINTED)
SELECT_SONGS = """
SELECT %s, %s, HEX(%s) as %s FROM %s WHERE %s = 1;
""" % (Database.FIELD_SONG_ID, Database.FIELD_SONGNAME, Database.FIELD_FILE_SHA1, Database.FIELD_FILE_SHA1,
SONGS_TABLENAME, FIELD_FINGERPRINTED)
# drops
DROP_FINGERPRINTS = "DROP TABLE IF EXISTS %s;" % FINGERPRINTS_TABLENAME
DROP_SONGS = "DROP TABLE IF EXISTS %s;" % SONGS_TABLENAME
# update
UPDATE_SONG_FINGERPRINTED = """
UPDATE %s SET %s = 1 WHERE %s = %%s
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED, Database.FIELD_SONG_ID)
# delete
DELETE_UNFINGERPRINTED = """
DELETE FROM %s WHERE %s = 0;
""" % (SONGS_TABLENAME, FIELD_FINGERPRINTED)
def __init__(self, **options):
super(SQLDatabase, self).__init__()
self.cursor = cursor_factory(**options)
self._options = options
def after_fork(self):
# Clear the cursor cache, we don't want any stale connections from
# the previous process.
Cursor.clear_cache()
def setup(self):
"""
Creates any non-existing tables required for dejavu to function.
This also removes all songs that have been added but have no
fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.CREATE_SONGS_TABLE)
cur.execute(self.CREATE_FINGERPRINTS_TABLE)
cur.execute(self.DELETE_UNFINGERPRINTED)
def empty(self):
"""
Drops tables created by dejavu and then creates them again
by calling `SQLDatabase.setup`.
.. warning:
This will result in a loss of data
"""
with self.cursor() as cur:
cur.execute(self.DROP_FINGERPRINTS)
cur.execute(self.DROP_SONGS)
self.setup()
def delete_unfingerprinted_songs(self):
"""
Removes all songs that have no fingerprints associated with them.
"""
with self.cursor() as cur:
cur.execute(self.DELETE_UNFINGERPRINTED)
def get_num_songs(self):
"""
Returns number of songs the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_UNIQUE_SONG_IDS)
for count, in cur:
return count
return 0
def get_num_fingerprints(self):
"""
Returns number of fingerprints the database has fingerprinted.
"""
with self.cursor() as cur:
cur.execute(self.SELECT_NUM_FINGERPRINTS)
for count, in cur:
return count
return 0
def set_song_fingerprinted(self, sid):
"""
Set the fingerprinted flag to TRUE (1) once a song has been completely
fingerprinted in the database.
"""
with self.cursor() as cur:
cur.execute(self.UPDATE_SONG_FINGERPRINTED, (sid,))
def get_songs(self):
"""
Return songs that have the fingerprinted flag set TRUE (1).
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONGS)
for row in cur:
yield row
def get_song_by_id(self, sid):
"""
Returns song by its ID.
"""
with self.cursor(cursor_type=DictCursor) as cur:
cur.execute(self.SELECT_SONG, (sid,))
return cur.fetchone()
def insert(self, hash, sid, offset):
"""
Insert a (sha1, song_id, offset) row into database.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_FINGERPRINT, (hash, sid, offset))
def insert_song(self, songname, file_hash):
"""
Inserts song in the database and returns the ID of the inserted record.
"""
with self.cursor() as cur:
cur.execute(self.INSERT_SONG, (songname, file_hash))
return cur.lastrowid
def query(self, hash):
"""
Return all tuples associated with hash.
If hash is None, returns all entries in the
database (be careful with that one!).
"""
# select all if no key
query = self.SELECT_ALL if hash is None else self.SELECT
with self.cursor() as cur:
cur.execute(query)
for sid, offset in cur:
yield (sid, offset)
def get_iterable_kv_pairs(self):
"""
Returns all tuples in database.
"""
return self.query(None)
def insert_hashes(self, sid, hashes):
"""
Insert series of hash => song_id, offset
values into the database.
"""
values = []
for hash, offset in hashes:
values.append((hash, sid, offset))
with self.cursor() as cur:
for split_values in grouper(values, 1000):
cur.executemany(self.INSERT_FINGERPRINT, split_values)
def return_matches(self, hashes):
"""
Return the (song_id, offset_diff) tuples associated with
a list of (sha1, sample_offset) values.
"""
# Create a dictionary of hash => offset pairs for later lookups
mapper = {}
for hash, offset in hashes:
mapper[hash.upper()] = offset
# Get an iteratable of all the hashes we need
values = mapper.keys()
with self.cursor() as cur:
for split_values in grouper(values, 1000):
# Create our IN part of the query
query = self.SELECT_MULTIPLE
query = query % ', '.join(['UNHEX(%s)'] * len(split_values))
cur.execute(query, split_values)
for hash, sid, offset in cur:
# (sid, db_offset - song_sampled_offset)
yield (sid, offset - mapper[hash])
def __getstate__(self):
return (self._options,)
def __setstate__(self, state):
self._options, = state
self.cursor = cursor_factory(**self._options)
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return (filter(None, values) for values
in izip_longest(fillvalue=fillvalue, *args))
def cursor_factory(**factory_options):
def cursor(**options):
options.update(factory_options)
return Cursor(**options)
return cursor
class Cursor(object):
"""
Establishes a connection to the database and returns an open cursor.
```python
# Use as context manager
with Cursor() as cur:
cur.execute(query)
```
"""
_cache = Queue.Queue(maxsize=5)
def __init__(self, cursor_type=mysql.cursors.Cursor, **options):
super(Cursor, self).__init__()
try:
conn = self._cache.get_nowait()
except Queue.Empty:
conn = mysql.connect(**options)
else:
# Ping the connection before using it from the cache.
conn.ping(True)
self.conn = conn
self.conn.autocommit(False)
self.cursor_type = cursor_type
@classmethod
def clear_cache(cls):
cls._cache = Queue.Queue(maxsize=5)
def __enter__(self):
self.cursor = self.conn.cursor(self.cursor_type)
return self.cursor
def __exit__(self, extype, exvalue, traceback):
# if we had a MySQL related error we try to rollback the cursor.
if extype is mysql.MySQLError:
self.cursor.rollback()
self.cursor.close()
self.conn.commit()
# Put it back on the queue
try:
self._cache.put_nowait(self.conn)
except Queue.Full:
self.conn.close()
|
# Sentiment Analyzer.py
#!/usr/bin/python
import os
import sys
import json
import joblib
import itertools
from statistics import mode
import numpy as np
import sklearn
from keras.models import load_model
from sklearn.ensemble import VotingClassifier
from keras.preprocessing.text import Tokenizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
def sentimentAnalyzer(tweet):
# Pre-Trained SVM Classifier
svm_classifier = joblib.load('assets/trained_models/Svm_Classifier_linear.pkl')
# Vectorizer used for training
vectorizer = joblib.load('assets/trained_models/SVM_Vectorizer.pkl')
# Transforming test data using count vectorizer
svm_naive_tweet=vectorizer.transform([tweet])
# Predicting test data values
svm_prediction=svm_classifier.predict(svm_naive_tweet)
svm_prediction=int(''.join(map(str,svm_prediction.tolist())))
# Pre-Trained Naive Bayes Classifier
naive_classifier = joblib.load('assets/trained_models/NaiveBayes_Classifier.pkl')
# Predicting test data values
naive_prediction=naive_classifier.predict(svm_naive_tweet)
naive_prediction=int(''.join(map(str,naive_prediction.tolist())))
# Pre-Trained LSTM Classifier
lstm_classifier = load_model('assets/trained_models/NeuralNetworkLSTM.h5')
# LSTM Tokenizer used for training
tokenizer = joblib.load('assets/trained_models/lstm_tokenizer.pkl')
tokenizer.fit_on_texts(tweet)
lstm_tweet = tokenizer.texts_to_sequences(tweet)
try:
lstm_tweet = pad_sequences(lstm_tweet)
# Predicting test data values
lstm_prediction=(lstm_classifier.predict(lstm_tweet) > 0.5).astype("int32")
lstm_prediction = list(itertools.chain(*lstm_prediction))
lstm_prediction = max(lstm_prediction, key = lstm_prediction.count)
# Voting on multiple combinations
print(json.dumps(mode([svm_prediction, naive_prediction, lstm_prediction])))
except ValueError:
print(json.dumps(1))
# This file perform sentiment analysis on tweets to be stored in the mySQL database
if __name__ == "__main__":
tweet =sys.argv[1]
tweet =json.loads(tweet)
sentimentAnalyzer(tweet)
|
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
def GetScalarTypeFromAttr(attr):
'''
returns the (scalar, isArray) where isArray is True if it was an array type
'''
# Usd.Attribute and customAttributes.CustomAttribute have a
# GetTypeName function, while Sdf.AttributeSpec has a typeName attr.
if hasattr(attr, 'GetTypeName'):
typeName = attr.GetTypeName()
else:
typeName = attr.typeName
from pxr import Sdf
if isinstance(typeName, Sdf.ValueTypeName):
return typeName.scalarType, typeName.isArray
else:
return None, False
def ToString(v, typeName=None):
"""Returns a string representing a "detailed view" of the value v.
This string is used in the watch window"""
from pxr import Tf, Gf
import locale
import pprint
if v is None:
return 'None'
if not typeName:
typeName = str(v.__class__)
if typeName:
from pxr import Sdf
if isinstance(typeName, Sdf.ValueTypeName):
tfType = typeName.type
else:
tfType = Sdf.GetTypeForValueTypeName(typeName)
if tfType != Tf.Type.Unknown:
typeName = tfType.typeName
# Pretty-print a bounding box
if isinstance(v, Gf.BBox3d):
prettyMatrix = ("%s\n%s\n%s\n%s" % (v.matrix[0], v.matrix[1],
v.matrix[2], v.matrix[3])).replace("(","").replace(")","")
result = "Endpts of box diagonal:\n%s\n%s\n\nTransform matrix:\n%s\n" \
% (v.box.GetCorner(0), v.box.GetCorner(7), prettyMatrix)
if (v.hasZeroAreaPrimitives):
result += "\nHas zero-area primitives\n"
else:
result += "\nDoes not have zero-area primitives\n"
worldSpaceRange = Gf.Range3d()
worldSpaceRange.UnionWith(v.matrix.Transform(v.GetRange().GetMin()))
worldSpaceRange.UnionWith(v.matrix.Transform(v.GetRange().GetMax()))
result += "\nWorld-space range:\n%s\n%s\n" % \
(worldSpaceRange.GetMin(), worldSpaceRange.GetMax())
# Pretty-print a GfMatrix*
elif typeName.startswith("GfMatrix"):
result = ""
numRows = int(typeName[8])
for i in range(numRows):
result += str(v[i]) + "\n"
result = result.replace("(","").replace(")","")
# Pretty-print a GfVec*
elif typeName.startswith("GfVec"):
result = "( %s )" % (", ".join([
ToString(x) for x in v]))
# Pretty-print a TfTimeStamp
elif isinstance(v, Tf.TimeStamp):
from datetime import datetime
dt = datetime.fromtimestamp( v.Get() )
result = dt.isoformat(' ')
# pretty print an int
elif isinstance(v, int):
result = locale.format("%d", v, grouping=True)
# pretty print a float
elif isinstance(v, float):
result = locale.format("%f", v, grouping=True)
# print a string as-is
elif isinstance(v, str):
result = v
else:
result = pprint.pformat(v)
return result
def ToClipboard(v, typeName=None):
# XXX: we can have this give you the repr
return ToString(v, typeName)
|
#!/usr/bin/env python
"""
dnolivieri: 23 dec 2015
Bootstrap workflow for the MR ensemble RF code.
"""
import collections
import random as rnd
import numpy as np
import matplotlib.pyplot as plt
import time
import os, fnmatch
import sys
import itertools
from operator import itemgetter, attrgetter
import math
from Bio import SeqIO
from Bio import Seq
from Bio.SeqRecord import SeqRecord
from scipy import *
import struct
import re
from propy import PyPro
from propy.GetProteinFromUniprot import GetProteinSequence
import json
import cPickle as pickle
from collections import defaultdict
from banyan import *
import multiprocessing
from copy import deepcopy
import timeit
import operator
import getMRfeatVec01 as FVec
#import mrvTrain01 as RFtrain
import mrvTrainSG02 as RFtrain
import mrvPredict03 as RFpredict
import processItrn02 as pItrn
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
class vsBootStrapML:
def __init__(self, S, loci_classes, P, mlist):
self.S = S
self.P = P
self.loci_classes = loci_classes
self.speciesList = mlist
self.Nestimators=250
#self.bkg_total = 10000
self.bkg_total = 6500
def convert_full_background(self):
F = FVec.getMRFeatureVectors(self.S)
#bkgndFile = "./dataVgeneDB/noMotifsBcknd.fasta"
#bkgndFile = "./dataVgeneDB/bckgnd_run_r6500.fasta"
bkgndFile = "./dataVgeneDB/All_bkg_m.fasta"
#Zbar = F.background_from_fasta(bkgndFile, self.bkg_total)
## Try the hybrid method
Zbar = F.hybrid_descriptors_from_fasta(bkgndFile, self.bkg_total)
pkloutfile="./bstrap/bckgnd_n" + str(self.bkg_total) + ".pkl"
save_object(Zbar, pkloutfile)
def get_existing_data(self, infile):
qp = open(infile, 'rb')
D = pickle.load(qp)
return D
def convert_sequences(self, iterCount):
print "-----inside convert_sequences------"
F = FVec.getMRFeatureVectors(self.S)
G = {}
for loci in self.loci_classes:
posCount=0
G.update({loci:0})
"""
if iterCount==0:
lociFile = "./bstrap/" + loci + "_" + str(iterCount) +".fasta"
else:
lociFile = "./bstrap/" + loci + "_delta_" + str(iterCount) +".fasta"
"""
## it will always be the first one!... I still have not done the delta correctly
lociFile = "./bstrap/" + loci + "_0.fasta"
for record in SeqIO.parse(lociFile, "fasta"):
posCount+=1
print "lociFile=", lociFile, " posCount=", posCount
if iterCount>0:
prev_loci_file = "./bstrap/" + loci + "_" + str(iterCount - 1 ) +".pkl"
p = self.get_existing_data(prev_loci_file)
zp= np.array(p[0])
posCount += zp.shape[0]
print "zp.shape[0]=", zp.shape[0], "posCount=", posCount
if posCount * 3 < 10000:
G[loci] = posCount * 3
else:
G[loci] = 9999
"""
if G[loci] < 200:
G[loci] = 200
"""
print "G=", G
print "---getting sequences from background ---"
bkgndFile = "./bstrap/bckgnd_n" + str(self.bkg_total) + ".pkl"
zB = self.get_existing_data(bkgndFile)
print "---starting loci conversion ---"
for loci in self.loci_classes:
lociFile = "./bstrap/" + loci + "_" + str(iterCount) +".fasta"
#Dbar=F.descriptors_from_fasta(lociFile)
## try hybrid method
Dbar=F.hybrid_descriptors_from_fasta(lociFile)
print loci, " len(Dbar)=", len(Dbar), len(Dbar[0])
if iterCount>0:
prev_loci_file = "./bstrap/" + loci + "_" + str(iterCount - 1 ) +".pkl"
print "prev_loci_file=", prev_loci_file
yP = self.get_existing_data(prev_loci_file)
if len(Dbar)>0:
ybar={}
print "updating "
for k in range(len(yP)):
ybar.update({k:yP[k]+Dbar[k]})
print len(yP[k]), len(Dbar[k]), len(ybar[k])
yP = ybar
else:
yP = Dbar
for k in range(len(yP)):
print len(yP[k])
pkloutfile=lociFile.replace(".fasta", ".pkl")
print "pkloutfile=", pkloutfile
save_object(yP, pkloutfile)
rand_samp=True
zbar = {}
jz=rnd.sample(range(0,self.bkg_total),int(G[loci]))
for k in range(len(zB)):
# get random samples...
print G[loci]
if rand_samp:
#zbar.update({k:zB[k][jz] } )
zbar.update({k:[zB[k][zkk] for zkk in jz]} )
else:
zbar.update({k:zB[k][0:G[loci]] } )
x= np.array(zbar[k])
print x.shape
pkloutfile="./bstrap/bckgnd_"+loci+"_"+ str(iterCount)+ ".pkl"
save_object(zbar, pkloutfile)
def make_training_matrices (self, iterCount ):
print "---making loci based training matrices ----"
for loci in self.loci_classes:
loci_file = "./bstrap/" + loci + "_" + str(iterCount) +".pkl"
print "loci_file=", loci_file
D = self.get_existing_data( loci_file )
bkg_file="./bstrap/bckgnd_"+loci+"_"+ str(iterCount)+ ".pkl"
B = self.get_existing_data( bkg_file )
for k in range(len(D)):
zP=np.array(D[k])
zB=np.array(B[k])
valP = np.ones( [zP.shape[0],1] )
valB = np.zeros( [zB.shape[0],1] )
print k, zP.shape, zB.shape, valP.shape, valB.shape
train_signals = np.vstack([zP, zB])
train_vals = np.vstack([valP, valB])
print "train_signals.shape=", train_signals.shape
print "train_vals.shape=", train_vals.shape
out1="./bstrap/train_signal_"+ loci + "_"+ str(k) +".pkl"
out2="./bstrap/train_val_"+ loci + "_"+ str(k) +".pkl"
outfile = "./bstrap/trainMat_" + loci + "_r"+str(k) +"_n"+ str(iterCount)+".pkl"
print outfile
A= RFtrain.TrainClassifier( train_signals, train_vals, outfile)
def Predict(self, iterCount):
mbar = []
#threshold = 0.95 - iterCount*0.1
## use the 0.5 threshold to get a distribution; the iteration must be done by hand....
threshold = 0.5
#cntgs=['AQIA01064159.1']
#cntgs=['AQIB01146122.1']
"""
cntgs=['CABD02231421.1']
cntgs=['CABD02231427.1']
cntgs=['AJFE01012238.1']
cntgs=['AHZZ01042080.1']
cntgs=['ABGA01394058.1']
cntgs=['ABGA01252219.1']
cntgs=['ABGA01275597.1']
cntgs=['JZKE01249934.1']
cntgs=['JABR01071860.1']
cntgs=['JABR01020049.1']
cntgs=['JABR01016432.1']
cntgs=['ABRT010282393.1']
"""
#cntgs=['ABRT010078850.1']
#cntgs=['chromosome:MMUL_1:3:1:196418989:1']
cntgs=None
for m in self.speciesList:
mbar.append(m)
M = RFpredict.RunVregPredict(self.P, speciesList= mbar, check_contigs=True, contigs=cntgs)
M.analyze_files(iterCount, self.loci_classes, threshold)
mbar = []
def PrepareNext(self, iterCount ):
P= pItrn.ProcessIteration(iterCount, self.loci_classes)
P.run()
def run(self,n):
# 0. Do the background once:
#self.convert_full_background()
"""
print "#1.____ Sequences to Feature vecs"
self.convert_sequences( n )
print "#2._____ SL Training mats"
self.make_training_matrices( n )
"""
print "#3._____ Predict at iteration n"
self.Predict(n)
"""
print "#4.____ Determine new sequences"
self.PrepareNext( n )
"""
# -----------------------------------------------
if __name__ == '__main__':
Vs_bstrap = 'Vs_init_bstrap_ighv.json'
json_data=open( Vs_bstrap )
S = json.load(json_data)
json_data.close()
#family=sys.argv[1]
#hostmach = int(sys.argv[2])
family = 'primates'
#family = 'reptiles'
if family=='primates':
#Vs_wgs_prediction = 'wgs_primates_ig.json'
### this is the full
#Vs_wgs_prediction = 'wgs_primates_tr.json'
Vs_wgs_prediction = 'wgs_primates.json'
mlist=["Chlorocebus_AQIB01","Gorilla_CABD0","Macacaf_AQIA01",
"Macacam_AANU01","Macacan_JZLF01","Mandrillus_JYKQ01","Microcebus_ABDC02",
"Nomascus_ADFV01","Panp_AJFE01","Pant_AACZ03","Papio_AHZZ01","Pongo_ABGA01",
"Propithecus_JZKE01","Rhinopithecus_JABR01","Saimiri_AGCE01","Tarsius_ABRT02"]
"""
mlist=["Chlorocebus_AQIB01","Gorilla_CABD0","Macacaf_AQIA01",
"Mandrillus_JYKQ01","Microcebus_ABDC02",
"Nomascus_ADFV01","Panp_AJFE01","Pant_AACZ03","Papio_AHZZ01","Pongo_ABGA01",
"Propithecus_JZKE01","Rhinopithecus_JABR01","Saimiri_AGCE01","Tarsius_ABRT02"]
"""
#mlist=["Chlorocebus_AQIB01"]
#mlist=["Macacam_AANU01", "Mandrillus_JYKQ01"]
#mlist=["Macacam_AANU01"]
mlist=["Chlorocebus_AQIB01","Gorilla_CABD02","Macacaf_AQIA01",
"Mandrillus_JYKQ01","Microcebus_ABDC01",
"Nomascus_ADFV01","Panp_AJFE01","Pant_AACZ03","Papio_AHZZ01","Pongo_ABGA01",
"Propithecus_JZKE01","Rhinopithecus_JABR01","Saimiri_AGCE01","Tarsius_ABRT01"]
#mlist=["Tarsius_ABRT01"]
#mlist=["Macacam_MMUL1"]
elif family=='glires':
Vs_wgs_prediction = 'wgs_glires.json'
mlist=[ ]
elif family=='reptiles':
Vs_wgs_prediction = 'wgs_reptiles.json'
mlist=["Ophiophagus_hannah"]
print mlist
json_data2=open( Vs_wgs_prediction )
P = json.load(json_data2)
json_data2.close()
#loci_classes = ["ighv", "igkv", "iglv"]
#loci_classes = ["trav", "trbv", "trgv", "trdv"]
loci_classes = ["ighv", "igkv", "iglv", "trav", "trbv", "trgv", "trdv"]
B = vsBootStrapML(S, loci_classes, P, mlist)
"""
for n in range(0,3):
print "**** N = ", n, " ****"
B.run(n)
"""
n=4
print "**** N = ", n, " ****"
B.run(n)
|
# from beluga.optim import *
from optimalcontrol.elements import Problem
from sympy import *
from sympy.core.function import AppliedUndef, Function
import pystache, imp, inspect, logging, os
import re as _re
import beluga.bvpsol.BVP as BVP
from beluga.utils import sympify, keyboard, ipsh
from beluga.optim.problem import *
import dill
import numpy as np
from collections import Iterable
class OCDAENumerical(object):
name = 'dae-numerical'
def get_actions(self):
# Tuple: (func, arg1, arg2, ...)
self.actions = [
(self.process_quantities),
(self.make_costates),
(self.make_aug_cost, 'initial'),
(self.make_aug_cost, 'terminal'),
# (self.process_path_constraints, problem),
]
return self.actions
def run(self):
for action in self.actions:
if isinstance(action, Iterable):
action[0](*action[1:])
else:
action()
# pystache renderer without HTML escapes
renderer = pystache.Renderer(escape=lambda u: u)
def __init__(self, problem, cached=True):
"""!
\brief Initializes all of the relevant necessary conditions of opimality.
\author Michael Grant
\author Thomas Antony
\version 0.2
\date 05/15/16
"""
self.problem = problem
self.get_actions()
self.aug_cost = {}
self.costates = []
self.costate_rates = []
self.ham = sympify('0')
self.ham_ctrl_partial = []
self.ctrl_free = []
self.parameter_list = []
self.bc_initial = []
self.bc_terminal = []
# from .. import Beluga # helps prevent cyclic imports
self.compile_list = ['deriv_func','bc_func','compute_control']
# self.template_prefix = Beluga.config.getroot()+'/beluga/bvpsol/templates/'
self.template_suffix = '.py.mu'
self.dae_states = []
self.dae_equations = []
def cache_bvp(self, problem, filename=None):
"""
\brief Saves BVP object into file on disk
Arguments:
problem : Problem object
filename: Full path to cache file (optional)
default value: <self.problem.name>_bvp.dat
\date 01/27/2016
"""
if filename is None:
filename = problem.name+'_bvp.dat'
with open(filename,'wb') as f:
try:
logging.info('Caching BVP information to file')
bvp = dill.dump(self.bvp,f)
return True
except:
logging.warn('Failed to save BVP to '+filename)
return False
def load_bvp(self, problem, filename=None):
"""
\brief Loads pre-computed BVP object from cache file
\author Thomas Antony
Arguments:
problem : Problem object
filename: Full path to cache file (optional)
default value: <self.problem.name>_bvp.dat
\date 01/27/2016
"""
if filename is None:
filename = problem.name+'_bvp.dat'
if not os.path.exists(filename):
return None
with open(filename,'rb') as f:
try:
logging.info('Loading BVP information from cache')
bvp = dill.load(f)
return bvp
except Exception as e:
logging.warn('Failed to load BVP from '+filename)
logging.debug(e)
return None
def derivative(self, expr, var, dependent_variables):
"""
Take derivative taking pre-defined quantities into consideration
dependent_variables: Dictionary containing dependent variables as keys and
their expressions as values
"""
dep_var_names = dependent_variables.keys()
dep_var_expr = [(expr) for (_,expr) in dependent_variables.items()]
dFdq = [diff(expr, dep_var).subs(dependent_variables.items()) for dep_var in dep_var_names]
dqdx = [diff(qexpr, var) for qexpr in dep_var_expr]
# Chain rule + total derivative
out = sum(d1*d2 for d1,d2 in zip(dFdq, dqdx)) + diff(expr, var)
return out
def make_costate_rate(self, states):
"""!
\brief Creates the symbolic differential equations for the costates.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
# TODO: Automate partial derivatives of numerical functions
# for state in states:
# rate = diff(sympify('-1*(' + self.ham + ')'),state)
# # numerical_diff = rate.atoms(Derivative)
# self.costate_rates.append(str(rate))
self.costate_rates = [self.derivative(-1*(self.ham),state, self.quantity_vars) for state in states]
# self.costate_rates.append(str(diff(sympify(
# '-1*(' + self.ham + ')'),state)))
for state in states:
corate=self.derivative(-1*(self.ham),state,self.quantity_vars)
custom_diff=corate.atoms(Derivative)
repl = [] #Replacements for custom functions
for d in custom_diff:
for f in d.atoms(AppliedUndef): #Just extracts (never more than 1 f)
for v in f.atoms(Symbol):
if str(v)==str(state): #This should only happen once!
replv=sympify('im('+str(f.subs(v,v+_h))+')/1e-30')
repl.append((d,replv))
self.costate_rates.append(corate.subs(repl))
self.costate_rates=sympify2(self.costate_rates)
#h = 1e-30
#j = symbols('1j')
#self.costate_rates = []
#for state in states:
# state = sympify(state)
# H = self.make_state_dep_ham(state, self.problem)
# if H != 0:
# self.costate_rates.append('im(' + str((H.subs(state, (state + h * j)))) + ')/' + str(h))
# else:
# self.costate_rates.append('0')
#self.costate_rates=sympify2(self.costate_rates)
#print(Matrix([sympify2(state.process_eqn) for state in self.problem.states()]+ self.costate_rates))
#quit()
def make_costate_rate_numeric(self, states):
"""!
\brief Creates the symbolic differential equations for the costates.
\author Michael Grant
\author Sean Nolan
\version 0.1
\date 06/22/16
"""
h = 1e-100
j = symbols('1j')
self.costate_rates = []
for state in states:
state = sympify(state)
H = self.make_state_dep_ham(state, self.problem)
if H != 0:
self.costate_rates.append('-np.imag(' + str((H.subs(state, (state + h * j)))) + ')/' + str(h))
else:
self.costate_rates.append('0')
def make_ctrl_partial(self, controls):
"""!
\brief Symbolically compute dH/du where H is the Hamiltonian and u is the control.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
self.ham_ctrl_partial = []
_h = sympify2('1j*(1e-30)')
for ctrl in controls:
dHdu = self.derivative(sympify(self.ham), ctrl, self.quantity_vars)
custom_diff = dHdu.atoms(Derivative)
for d in custom_diff:
for f in d.atoms(AppliedUndef):
for v in f.atoms(Symbol):
if str(v)==str(ctrl):
replv=sympify('im('+str(f.subs(v,v+_h))+')/1e-30')
repl.append((d,replv))
self.ham_ctrl_partial.append(dHdu.subs(repl))
# Substitute "Derivative" with complex step derivative
#repl = {(d,im(f.func(v+1j*1e-30))/1e-30) for d in custom_diff
# for f,v in zip(d.atoms(AppliedUndef),d.atoms(Symbol))}
#self.ham_ctrl_partial.append(dHdu.subs(repl))
def selective_expand(self, expr, var_select, subs_list):
# Expands the expressions specified by subs_list if they contain the
# variables in var_select
select_subs = [qty for qty in subs_list.items() for var in var_select if var.sym in qty[1].atoms()]
# Substitute only relevant symbols in the expression
return expr.subs(select_subs)
def make_ctrl(self, problem, mode='dae'):
if mode == 'dae':
self.make_ctrl_dae(problem)
else:
self.make_ctrl_analytic(problem.controls())
def make_ctrl_dae(self, problem):
"""!
\brief Compute EOMs for controls.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 04/10/16
"""
if len(self.equality_constraints) > 0:
self.mu_vars = [sympify('mu'+str(i+1)) for i in range(len(self.equality_constraints))]
self.mu_lhs = [sympify(c.expr) for c in self.equality_constraints]
else:
self.mu_vars = self.mu_lhs = []
g = self.ham_ctrl_partial + self.mu_lhs
X = [state.sym for state in problem.states()] + [Symbol(costate) for costate in self.costates]
U = [c.sym for c in problem.controls()] + self.mu_vars
xdot = Matrix([sympify(state.process_eqn) for state in problem.states()] + self.costate_rates)
# Compute Jacobian
dgdX = Matrix([[self.derivative(g_i, x_i, self.quantity_vars) for x_i in X] for g_i in g])
dgdU = Matrix([[self.derivative(g_i, u_i, self.quantity_vars) for u_i in U] for g_i in g])
udot = dgdU.LUsolve(-dgdX*xdot); # dgdU * udot + dgdX * xdot = 0
self.dae_states = U
self.dae_equations = list(udot)
self.dae_bc = g
def make_ctrl_analytic(self, controls):
"""!
\brief Symbolically compute the solutions for the control along control-unconstrained arcs.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
# Solve all controls simultaneously
logging.info("Finding optimal control law ...")
# If equality constraints are present
# We need to solve for 'mu's as well
lhs = self.ham_ctrl_partial
vars = [c.sym for c in controls]
self.mu_vars = []
self.mu_lhs = []
if len(self.equality_constraints) > 0:
self.mu_vars = [sympify('mu'+str(i+1)) for i in range(len(self.equality_constraints))]
self.mu_lhs = [sympify(c.expr) for c in self.equality_constraints]
try:
var_list = list(vars + self.mu_vars)
# var_list = vars
eqn_list = []
for eqn in list(lhs + self.mu_lhs ):
eqn_list.append(self.selective_expand(eqn, controls, self.quantity_vars))
logging.info("Attempting using SymPy ...")
logging.debug("dHdu = "+str(eqn_list))
ctrl_sol = solve(eqn_list, var_list, dict=True)
#TODO: ^ uncomment once done with the aircraft noise problem
#ctrl_sol = CtrlSols() #I'm going to hell for this
#logging.info("Grabbed the control laws for the aircraft noise!")
# raise ValueError() # Force mathematica
except ValueError as e: # FIXME: Use right exception name here
logging.debug(e)
logging.info("No control law found")
from beluga.utils.pythematica import mathematica_solve
logging.info("Attempting using Mathematica ...")
#var_sol = mathematica_solve(lhs+self.mu_lhs,vars+self.mu_vars)
# TODO: Extend numerical control laws to mu's
ctrl_sol = var_sol
if ctrl_sol == []:
logging.info("No analytic control law found, switching to numerical method")
#logging.info("Done")
# solve() returns answer in the form
# [ {ctrl1: expr11, ctrl2:expr22},
# {ctrl1: expr21, ctrl2:expr22}]
# Convert this to format required by template
self.control_options = [ [{'name':str(ctrl), 'expr':str(expr)}
for (ctrl,expr) in option.items()]
for option in ctrl_sol]
def make_aug_cost(self, location):
"""!
\brief Symbolically create the augmented cost functional.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
aug_cost = self.problem.cost[location].expr
constraint = self.problem.constraints(location)
# Do in two steps so that indices are "right"
# TODO: apply quantities
filtered_list = [c for c in constraint if c.type==location]
self.parameter_list += [c.make_multiplier(ind) for (ind,c) in enumerate(filtered_list,1)]
# self.aug_cost[location] = aug_cost + ''.join(' + (%s)' % c.make_aug_cost(ind)
# for (ind,c) in enumerate(filtered_list,1))
self.aug_cost[location] = aug_cost + sum([c.make_aug_cost(ind)
for (ind,c) in enumerate(filtered_list,1)])
def make_costate_bc(self, states, location):
"""!
\brief Symbolically create the boundary conditions at initial and final locations.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
if location is 'initial':
sign = sympify('-1')
elif location is 'terminal':
sign = sympify('1')
cost_expr = sign * (self.aug_cost[location])
#TODO: Fix hardcoded if conditions
#TODO: Change to symbolic
if location == 'initial':
# Using list comprehension instead of loops
# lagrange_ changed to l. Removed hardcoded prefix
self.bc_initial += [str(sympify(state.make_costate()) - self.derivative(sympify(cost_expr),state.sym, self.quantity_vars))
for state in states]
else:
# Using list comprehension instead of loops
self.bc_terminal += [str(sympify(state.make_costate()) - self.derivative(sympify(cost_expr),state.sym,self.quantity_vars))
for state in states]
def make_ham(self, problem):
"""!
\brief Symbolically create the Hamiltonian.
\author Michael Grant
\author Thomas Antony
\version 0.1
\date 06/30/15
"""
#TODO: Make symbolic
self.ham = sympify(problem.cost['path'].expr)
for i in range(len(problem.states())):
self.ham += sympify(self.costates[i]) * (sympify(problem.states()[i].process_eqn))
# Adjoin equality constraints
for i in range(len(self.equality_constraints)):
self.ham += sympify('mu'+str(i+1)) * (sympify(self.equality_constraints[i].expr))
def make_state_dep_ham(self, state, problem):
"""!
\brief Symbolically create the Hamiltonian.
\author Michael Grant
\author Sean Nolan
\version 0.1
\date 06/22/16
"""
H = sympify(0)
new_terms = sympify2(problem.cost['path'].expr)
atoms = new_terms.atoms()
if state in atoms:
H += new_terms
for i in range(len(problem.states())):
new_terms = sympify2(self.costates[i]) * (sympify2(problem.states()[i].process_eqn))
atoms = new_terms.atoms()
if state in atoms:
H += new_terms
# Adjoin equality constraints
for i in range(len(self.equality_constraints)):
new_terms = sympify2('mu' + str(i + 1)) * (sympify2(self.equality_constraints[i].expr))
atoms = new_terms.atoms()
if state in atoms:
H += new_terms
raw_terms = H.as_terms()
H_parts = [raw_terms[0][k][0] for k in range(len(raw_terms[0]))]
new_H = sympify(0)
for part in H_parts:
store = sympify(0)
tries = 0
while (part != store) & (tries <= 5):
store = part
part = part.expand()
tries += 1
raw_terms = part.as_terms()
sub_parts = [raw_terms[0][k][0] for k in range(len(raw_terms[0]))]
new_part = sympify(0)
for sub_part in sub_parts:
atoms = sub_part.atoms()
if state in atoms:
# sub_part = sub_part.factor()
new_part += sub_part
new_part = new_part.factor()
if len(str(new_part)) < 50:
new_part = new_part.simplify(ratio=1.0)
new_H += new_part
# print('New:' + str(len(str(new_H))))
# print('Old:' + str(len(str(H))))
return new_H
# Compiles a function template file into a function object
# using the given data
def compile_function(self,filename,verbose=False):
"""
Compiles a function specified by template in filename and stores it in
self.compiled
Returns:
bool: True if successful
Raises:
ValueError: If 'problem_data' or 'compiled' is not defined
"""
with open(filename) as f:
tmpl = f.read()
if self.problem_data is None:
raise ValueError('Problem data not defined. Unable to compile function.')
if self.compiled is None:
raise ValueError('Problem module not defined. Unable to compile function.')
# Render the template using the data
code = self.renderer.render(tmpl,self.problem_data)
# if verbose and 'compute_control' in filename:
# if verbose:
logging.debug(code)
# For security
self.compiled.__dict__.update({'__builtin__':{}})
return exec(code,self.compiled.__dict__)
# TODO: Maybe change all constraint limits (initial, terminal etc.) to be 'constants' that can be changed by continuation?
def sanitize_constraint(self,constraint,problem):
"""
Checks the initial/terminal constraint expression for invalid symbols
Also updates the constraint expression to reflect what would be in code
"""
if constraint.type == 'initial':
pattern = r'([\w\d\_]+)_0'
prefix = '_x0'
elif constraint.type == 'terminal':
pattern = r'([\w\d\_]+)_f'
prefix = '_xf'
else:
raise ValueError('Invalid constraint type')
m = _re.findall(pattern,constraint.expr)
invalid = [x for x in m if x not in problem.states()]
if not all(x is None for x in invalid):
raise ValueError('Invalid expression(s) in boundary constraint:\n'+str([x for x in invalid if x is not None]))
# Create new variable for output to avoid mutating original object
output = Constraint()
output.type = constraint.type
output.unit = constraint.unit
output.expr = _re.sub(pattern,prefix+r"['\1']",constraint.expr)
return output
# def process_systems(self,problem):
# """Traverses dynamic systems list and extracts information"""
# for (system_type,system_list) in problem.systems.items():
# for idx,system_inst in enumerate(system_list):
# # new_states = [state.add_prefix(system_type+'_'+str(idx)+'_')
# # for state in system_inst.states]
# new_states = [state
# for state in system_inst.states]
# # print(new_states)
# 1/4 s (c1-c2) = slope at x=0
# when slope = 1, s = 4/(c1-c2)
# wyen slope = 10, s = 40/(c1-c2)
def get_satfn(self, var, ubound=None, lbound=None, slopeAtZero=1):
# var -> varible inside saturation function
if ubound is None and lbound is None:
raise ValueError('At least one bound should be specified for the constraint.')
if ubound == lbound:
raise ValueError('The upper and lower bounds should be different.')
# Symmetric bounds
if ubound is None:
# Return lower bound sat fn
#ubound = -lbound
return lbound + exp(var)
elif lbound is None:
# Return upper bound sat fn
#lbound = -ubound
return ubound - exp(-var)
else:
print(ubound)
s = 4*slopeAtZero/(ubound - lbound)
return ubound - ( ubound - lbound )/( 1 + exp(s*var) )
def process_path_constraints(self, problem):
constraints = problem.constraints().get('path')
quantity_subs = self.quantity_vars.items()
path_cost_expr = sympify(problem.cost['path'].expr)
path_cost_unit = sympify(problem.cost['path'].unit)
if path_cost_expr == 0:
logging.debug('No path cost specified, using unit from terminal cost function')
problem.cost['path'].unit = problem.cost['terminal'].unit
path_cost_unit = sympify(problem.cost['terminal'].unit)
logging.debug('Path cost is of unit: '+str(path_cost_unit))
time_unit = Symbol('s')
for (ind,c) in enumerate(constraints):
# Determine order of constraint
logging.debug('Processing path constraint: '+c.label)
order = 0
cq = [sympify(c.expr)]
dxdt = [sympify(state.process_eqn) for state in problem.states()]
# Zeroth order constraints have no 'xi' state
xi_vars = []
h = []
while True:
control_found = False
for u in problem.controls():
if u.sym in cq[-1].subs(quantity_subs).atoms():
logging.info('Constraint is of order '+str(order))
control_found = True
break
if control_found:
break
dcdx = [self.derivative(cq[-1], state.sym, self.quantity_vars) for state in problem.states()]
# Chain rule (Assume there is no explciit time-dependence) to find dcdt
cq.append(sum(d1*d2 for d1,d2 in zip(dcdx, dxdt)))
order = order + 1
# Create the auxiliary state variables
xi_vars.append(Symbol('xi'+str(ind+1)+str(order)))
# Create the smoothing control variable
xi_vars.append(Symbol('ue'+str(ind+1)))
# TODO: Fix constraint object to accept two limits
c_limit = sympify(c.limit)
if c_limit.is_Number:
# TODO: Allow continuation on constraints
# Define new hidden constant
c_limit = sympify('_'+c.label)
print(c.limit)
problem.constant(str(c_limit),float(c.limit),c.unit)
logging.debug('Added constant '+str(c_limit))
if c.direction == '>':
c.lbound = c_limit
c.ubound = -c_limit
elif c.direction == '<':
c.ubound = c_limit
c.lbound = -c_limit
else:
raise ValueError('Invalid direction specified for constraint')
psi = self.get_satfn(xi_vars[0], ubound=c.ubound, lbound=c.lbound, slopeAtZero=1)
psi_vars = [(Symbol('psi'+str(ind+1)), psi)]
# Add to quantity list
self.quantity_vars[Symbol('psi'+str(ind+1))] = psi
self.quantity_list.append({'name':('psi'+str(ind+1)), 'expr':str(psi)})
# m-th order constraint needs up to m-th derivative of psi to be defined
psi_i = psi
for i in range(order):
psi_i = diff(psi_i, xi_vars[0])
# psi_vars.append((Symbol('psi'+str(ind+1)+str(i+1)+'('+str(xi_vars[0])+')'), psi_i))
psi_vars.append((Symbol('psi'+str(ind+1)+str(i+1)), psi_i))
self.quantity_vars[Symbol('psi'+str(ind+1)+str(i+1))] = psi_i
self.quantity_list.append({'name':('psi'+str(ind+1)+str(i+1)), 'expr':str(psi_i)})
# psi_vars = psi_vars + []
psi_var_sub = [(v,k) for k,v in psi_vars]
# FIXME: Hardcoded h derivatives for now
# h = [psi_vars[0][0]]
# h.append(psi_vars[1][0]*xi_vars[1]) # psi'*xi12
# h.append(psi_vars[2][0]*xi_vars[1] + psi_vars[1][0]*xi_vars[2]) # psi''*xi12 + psi'*xi13
# psi'''*xi12 + xi13*psi12'' + psi12*xi13 + psi11*ue1
# h.append(psi_vars[3][0]*xi_vars[1] + 2 * psi_vars[2][0]*xi_vars[2] + psi_vars[1][0]*xi_vars[3] )
#TODO: Hardcoded 't' as independent variable with unit of 's'
# c_vals = [80e3, -5000, 9.539074102210087] # third number is vdot at zero approx
c_vals = np.ones(order)*0.1
h = [psi_vars[0][1]]
for i in range(order):
# Add 'xi' state
problem.state(str(xi_vars[i]), str(xi_vars[i+1]),'('+c.unit+')/s^('+str(i)+')')
# Constraint all cq at initial point (forms constraints for xi_ij)
problem.constraints().initial(str(cq[i] - h[i]),'('+c.unit+')/s^('+str(i)+')')
# Add to initial guess vector
problem.guess.start.append(c_vals[i])
dhdxi = [diff(h[i], xi_v) for xi_v in xi_vars[:-1]]
dhdt = sum(d1*d2 for d1,d2 in zip(dhdxi,xi_vars[1:])) # xi11dot = xi12 etc.
dhdt = dhdt.subs(psi_var_sub)
h.append(dhdt)
# Add the smoothing control with the right unit
ue_unit = sympify('('+c.unit+')/(s^('+str(order)+'))')
problem.control(str(xi_vars[-1]), str(ue_unit))
logging.debug('Adding control '+str(xi_vars[-1])+' with unit '+str(ue_unit))
# Add equality constraint
cqi_unit = ue_unit*time_unit
problem.constraints().equality(str(cq[-1] - h[-1]),str(cqi_unit))
# Add smoothing factor
eps_const = Symbol('eps_'+c.label)
eps_unit = (path_cost_unit/ue_unit**2)/time_unit #Unit of integrand
problem.constant(str(eps_const), 1e-6, str(eps_unit))
logging.debug('Adding smoothing factor '+str(eps_const)+' with unit '+str(eps_unit))
# Append new control to path cost
path_cost_expr = path_cost_expr + eps_const*xi_vars[-1]**2
logging.debug('Updated path cost is: '+str(path_cost_expr))
problem.cost['path'].expr = str(path_cost_expr)
u_constraints = problem.constraints().get('control')
for (ind,c) in enumerate(u_constraints):
w_i = sympify('uw'+str(ind+1))
psi = self.get_satfn(w_i, ubound=sympify(c.ubound), lbound = sympify(c.lbound))
# Add the smoothing control
problem.control(str(w_i), c.unit)
# Add equality constraint
csym = sympify(c.expr)
problem.constraints().equality(str(csym - psi),c.unit)
uw_unit = symipfy2(c.unit)
eps_const = Symbol('eps_'+str(ind+1))
eps_unit = (path_cost_unit/uw_unit**2)/time_unit #Unit of integrand
problem.constant(str(eps_const), 1, str(eps_unit))
# problem.state('costC2','eps2*('+str(w_i)+'^2)','m^2/s^2')
def process_quantities(self,):
# Should this be moved into __init__ ?
# self.process_systems(problem)
logging.info('Processing quantity expressions')
# Process quantities
# Substitute all quantities that show up in other quantities with their expressions
# TODO: Sanitize quantity expressions
# TODO: Check for circular references in quantity expressions
if len(self.problem.quantity()) > 0:
quantity_subs = [(qty.var, qty.expr) for qty in self.problem.quantity()]
quantity_sym, quantity_expr = zip(*quantity_subs)
quantity_expr = [qty_expr.subs(quantity_subs) for qty_expr in quantity_expr]
# Use substituted expressions to recreate quantity expressions
quantity_subs = [(qty_var,qty_expr) for qty_var, qty_expr in zip(quantity_sym, quantity_expr)]
# Dictionary for use with mustache templating library
self.quantity_list = [{'name':str(qty_var), 'expr':str(qty_expr)} for qty_var, qty_expr in zip(quantity_sym, quantity_expr)]
# Dictionary for substitution
self.quantity_vars = dict(quantity_subs)
else:
self.quantity_list = quantity_subs = []
self.quantity_vars = {}
def make_costates(self):
self.costates = [state.make_costate() for state in self.problem.states()]
def get_bvp(self,problem,mode='dae'):
"""Perform variational calculus calculations on optimal control problem
and returns an object describing the boundary value problem to be solved
Returns: bvpsol.BVP object
"""
# Process intermediate variables
self.process_quantities(problem)
# Regularize path constraints using saturation functions
self.process_path_constraints(problem)
# self.state_subs = [(state.sym, sympify(state.process_eqn)) for state in problem.states()]
## Create costate list
# self.costates = [state.make_costate() for state in problem.states()]
self.make_costates(problem)
# for i in range(len(self.problem.states())):
# self.costates.append(self.problem.states()[i].make_costate())
# Build augmented cost strings
# aug_cost_init = sympify(problem.cost['initial'].expr)
self.make_aug_cost('initial')
# self.make_aug_cost(aug_cost_init, problem.constraints(), 'initial')
# aug_cost_term = sympify(problem.cost['terminal'].expr)
self.make_aug_cost('terminal')
# self.make_aug_cost(aug_cost_term, problem.constraints(), 'terminal')
# Add state boundary conditions
self.bc_initial = [self.sanitize_constraint(x,problem).expr
for x in problem.constraints().get('initial')]
self.bc_terminal = [self.sanitize_constraint(x,problem).expr
for x in problem.constraints().get('terminal')]
self.equality_constraints = problem.constraints().get('equality')
## Unconstrained arc calculations
# Construct Hamiltonian
self.make_ham(problem)
logging.debug('Hamiltonian : '+str(self.ham))
# Get list of all custom functions in the problem
# TODO: Check in places other than the Hamiltonian?
# TODO: Move to separate method?
func_list = sympify(self.ham).atoms(AppliedUndef)
# TODO: Change this to not be necessary
self.problem = problem
# Load required functions from the input file
new_functions = {(str(f.func),getattr(problem.input_module,str(f.func)))
for f in func_list
if hasattr(problem.input_module,str(f.func)) and
inspect.isfunction(getattr(problem.input_module,str(f.func)))}
problem.functions.update(new_functions)
undefined_func = [f.func for f in func_list if str(f.func) not in problem.functions]
if not all(x is None for x in undefined_func):
raise ValueError('Invalid function(s) specified: '+str(undefined_func))
# Compute costate conditions
self.make_costate_bc(problem.states(),'initial')
self.make_costate_bc(problem.states(),'terminal')
# TODO: Make this more generalized free final time condition
# HARDCODED tf variable
time_constraints = problem.constraints().get('independent')
if len(time_constraints) > 0:
self.bc_terminal.append('tf - 1')
else:
# Add free final time boundary condition
self.bc_terminal.append('_H - 0')
# Compute costate process equations
if mode == 'numerical':
self.make_costate_rate_numeric(problem.states())
else:
self.make_costate_rate(problem.states())
self.make_ctrl_partial(problem.controls())
# # Add support for state and control constraints
# problem.state('xi11','xi12','m')
# problem.state('xi12','ue1','m')
# self.costates += ['eta11','eta12'] # Costates for xi
#
# # Add new states to hamiltonian
# h1_3 = '(psi12*xi12^2 + psi11*ue1)'; # xi12dot = ue1
# c1_2 = 'u'
# self.ham += sympify('eta11*xi12 + eta12*ue1') #
# self.ham += sympify('mu1 * ('+c1_2+' - '+h1_3')')
#
# # TODO: Compute these automatically
# self.costate_rates += ['mu1*(xi12**2*psi1_3 + psi12*ue1)',
# 'mu1*(2*psi12*xi12) - eta1']
#
# Compute unconstrained control law
# (need to add singular arc and bang/bang smoothing, numerical solutions)
self.make_ctrl(problem, mode)
# Create problem dictionary
# NEED TO ADD BOUNDARY CONDITIONS
# bc1 = [self.sanitize_constraint(x) for x in initial_bc]
self.problem_data = {
'aux_list': [
{
'type' : 'const',
'vars': [const.var for const in problem.constants()]
},
{
'type' : 'constraint',
'vars': []
},
{
'type' : 'function',
'vars' : [func_name for func_name in problem.functions]
}
],
# TODO: Generalize 'tf' to independent variable for current arc
'state_list':
[str(state) for state in problem.states()] +
[str(costate) for costate in self.costates] +
['tf']
,
'parameter_list': [str(param) for param in self.parameter_list],
'deriv_list':
['(tf)*(' + str(sympify(state.process_eqn)) + ')' for state in problem.states()] +
['(tf)*(' + str(costate_rate) + ')' for costate_rate in self.costate_rates] +
# ['(tf)*((' + str(costate_rate) + ').imag)' for costate_rate in self.costate_rates] +
['tf*0'] # TODO: Hardcoded 'tf'
,
'state_rate_list':
['(tf)*(' + str(sympify2(state.process_eqn)) + ')' for state in problem.states()]
,
'dae_var_list':
[str(dae_state) for dae_state in self.dae_states],
'dae_eom_list':
['(tf)*('+str(dae_eom)+')' for dae_eom in self.dae_equations],
'dae_var_num': len(self.dae_states),
'num_states': 2*len(problem.states()) + 1,
'dHdu': [str(dHdu) for dHdu in self.ham_ctrl_partial] + self.mu_lhs,
'left_bc_list': self.bc_initial + (self.dae_bc if (mode == 'dae') else []),
'right_bc_list': self.bc_terminal,
'control_options': [] if (mode == 'dae') else self.control_options,
'control_list': [str(u) for u in problem.controls()] + [str(mu) for mu in self.mu_vars],
'num_controls': len(problem.controls()) + len(self.mu_vars), # Count mu multipliers
'ham_expr':self.ham,
# 'contr_dep_ham': [],
'quantity_list': self.quantity_list,
# 'dae_mode': mode == 'dae',
}
# problem.constraints[i].expr for i in range(len(problem.constraints))
# Create problem functions by importing from templates
self.compiled = imp.new_module('_probobj_'+problem.name)
if mode == 'dae':
# self.template_suffix = '_dae' + self.template_suffix
self.template_suffix = '_dae_num' + self.template_suffix
if mode == 'num':
self.template_suffix = '_num' + self.template_suffix
compile_result = [self.compile_function(self.template_prefix+func+self.template_suffix, verbose=True)
for func in self.compile_list]
dhdu_fn = self.compiled.get_dhdu_func
dae_num = len(problem.controls()) + len(self.mu_vars)
self.bvp = BVP(self.compiled.deriv_func,self.compiled.bc_func,dae_func_gen=dhdu_fn,dae_num_states=dae_num)
self.bvp.solution.aux['const'] = dict((const.var,const.val) for const in problem.constants())
self.bvp.solution.aux['parameters'] = self.problem_data['parameter_list']
self.bvp.solution.aux['function'] = problem.functions
# TODO: Fix hardcoding of function handle name (may be needed for multivehicle/phases)?
self.bvp.control_func = self.compiled.compute_control
self.bvp.problem_data = self.problem_data
# TODO: ^^ Do same for constraint values
return self.bvp
if __name__ == '__main__':
problem = Problem('brachisto')
problem.quantity('foo','bar')
im = OCDAENumerical(problem)
im.run()
print(im.quantity_list)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020-present Daniel [Mathtin] Shiko <wdaniil@mail.ru>
Project: Overlord discord bot
Contributors: Danila [DeadBlasoul] Popov <dead.blasoul@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "Mathtin"
import typing
from ..exceptions import InvalidConfigException
from typing import Any, Callable, Dict, Type, get_type_hints
class ConfigView(object):
# Class fields
_type_constructor_map: Dict[Type[Any], Callable[[Any, str], Any]] = {
int: lambda v, p: int(v),
float: lambda v, p: float(v),
bool: lambda v, p: bool(v),
str: lambda v, p: str(v),
list: lambda v, p: list(v),
dict: lambda v, p: dict(v),
}
_field_constructor_map: Dict[str, Callable[[Any, str], Any]] = None
# Instance fields
_path_prefix: str
def __init__(self, values: typing.Optional[Dict[str, Any]] = None, path_prefix: str = '') -> None:
if values is None:
values = {}
self._path_prefix = path_prefix
# Build {_field_constructor_map} for each class implementation
if self._field_constructor_map is None:
types = get_type_hints(self.__class__)
self.__class__._field_constructor_map = {
field: self.get_type_constructor(type_)
for field, type_ in types.items()
if not field.startswith('_')
}
# Construct each field value provided by {values}
for key, value in values.items():
if key not in self._field_constructor_map:
raise InvalidConfigException(f"Invalid key: {key}", self.path(key))
if value is not None:
constructor = self._field_constructor_map[key]
field_value = constructor(value, self.path(key))
setattr(self, key, field_value)
def get_type_constructor(self, type_: Type[Any]) -> Callable[[Any, str], Any]:
if type_ not in self._type_constructor_map:
self._type_constructor_map[type_] = self._resolve_constructor(type_)
return self._type_constructor_map[type_]
def _resolve_constructor(self, type_: Type[Any]) -> Callable[[Any, str], Any]:
# Primitive types already exist, only ConfigView and complex List/Dict type-hints are supported
if isinstance(type_, typing._GenericAlias):
# Resolve complex List type-hint
if type_._name == 'List':
sub_constructor = self.get_type_constructor(type_.__args__[0])
return lambda l, p: [sub_constructor(e, f'{p}[{i}]') for i, e in enumerate(l)]
# Resolve complex Dict type-hint
elif type_._name == 'Dict':
# Check key type
if type_.__args__[0] is not str:
raise TypeError(f"Unsupported dict key type hint: {type_.__args__[0]}")
sub_constructor = self.get_type_constructor(type_.__args__[1])
return lambda d, p: {k: sub_constructor(v, f'{p}.{k}') for k, v in d.items()}
# Other type-hints are not supported
raise TypeError(f"Unsupported type hint: {type_}")
# ConfigView are constructor-ready
if issubclass(type_, ConfigView):
return type_
raise TypeError(f"Unsupported type: {type_}")
def path(self, sub_path: str) -> str:
return f'{self._path_prefix}.{sub_path}' if self._path_prefix else sub_path
def get(self, path: str) -> Any:
if path == '.':
return self
parts = path.split('.')
node = self
for part in parts:
if not hasattr(node, part):
raise KeyError(f"Invalid path: {path}")
node = getattr(node, part)
return node
def to_dict(self) -> dict:
res = {}
for field in self._field_constructor_map:
value = getattr(self, field)
res[field] = self.deconstruct_obj(value)
return res
@staticmethod
def deconstruct_obj(o: Any) -> Any:
if isinstance(o, ConfigView):
return o.to_dict()
elif isinstance(o, list):
return [ConfigView.deconstruct_obj(v) for v in o]
elif isinstance(o, dict):
return {k: ConfigView.deconstruct_obj(v) for k, v in o.items()}
return o
def __iter__(self):
for field in self._field_constructor_map:
yield field, getattr(self, field)
|
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
import numpy as np
import pandas as pd
def read_reference_structure_for_q_calculation_4(oa, contact_threshold,rnative_dat, min_seq_sep=3, max_seq_sep=np.inf):
# use contact matrix for Q calculation
# this change use the canonical Qw/Qo calculation for reference Q
# for Qw calculation is 0; Qo is 1;
in_rnative = np.loadtxt(rnative_dat) # read in rnative_dat file for Q calculation
structure_interactions = []
chain_start = 0
count = 0
for i in range(oa.nres):
chain_start += count
count = 0
for j in range(oa.nres):
count +=1
# if abs(i-j) >= min_seq_sep and abs(i-j) <= max_seq_sep: # taking the signed value to avoid double counting
if j-i >= min_seq_sep and j-i <= max_seq_sep: # taking the signed value to avoid double counting
r_ijN = in_rnative[i][j]/10.0 * nanometers # convert to nm
if r_ijN < contact_threshold:
continue
sigma_ij = 0.1*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
i_index = oa.ca[i]
j_index = oa.ca[j]
structure_interaction = [i_index, j_index, [gamma_ij, r_ijN, sigma_ij]]
# print(i, j, r_ijN)
structure_interactions.append(structure_interaction)
return structure_interactions
def q_value_dat(oa, contact_threshold, rnative_dat="rnative.dat", min_seq_sep=3, max_seq_sep=np.inf):
### Added by Mingchen
### this function is solely used for template based modelling from rnative.dat file
### for details, refer to Chen, Lin & Lu Wolynes JCTC 2018
structure_interactions_tbm_q = read_reference_structure_for_q_calculation_4(oa, contact_threshold=contact_threshold,rnative_dat=rnative_dat, min_seq_sep=min_seq_sep, max_seq_sep=max_seq_sep)
normalization = len(structure_interactions_tbm_q)
qvalue_dat = CustomBondForce(f"(1/{normalization})*gamma_ij*exp(-(r-r_ijN)^2/(2*sigma_ij^2))")
qvalue_dat.addPerBondParameter("gamma_ij")
qvalue_dat.addPerBondParameter("r_ijN")
qvalue_dat.addPerBondParameter("sigma_ij")
for structure_interaction_tbm_q in structure_interactions_tbm_q:
qvalue_dat.addBond(*structure_interaction_tbm_q)
return qvalue_dat
def tbm_q_term(oa, k_tbm_q, rnative_dat="rnative.dat", tbm_q_min_seq_sep=3, tbm_q_cutoff=0.2*nanometers, tbm_q_well_width=0.1, target_q=1.0, forceGroup=26):
### Added by Mingchen Chen
### this function is solely used for template based modelling from rnative.dat file
### for details, refer to Chen, Lin & Lu Wolynes JCTC 2018
print("TBM_Q term ON")
tbm_q = CustomCVForce(f"{k_tbm_q}*(q-{target_q})^2")
q = q_value_dat(oa, contact_threshold=tbm_q_cutoff, rnative_dat=rnative_dat, min_seq_sep=tbm_q_min_seq_sep, max_seq_sep=np.inf)
tbm_q.addCollectiveVariable("q", q)
tbm_q.setForceGroup(forceGroup)
return tbm_q
def fragment_memory_term(oa, k_fm=0.04184, frag_file_list_file="./frag.mem", npy_frag_table="./frag_table.npy",
min_seq_sep=3, max_seq_sep=9, fm_well_width=0.1, UseSavedFragTable=True, caOnly=False, forceGroup=23):
# 0.8368 = 0.01 * 4.184 # in kJ/mol, converted from default value in LAMMPS AWSEM
k_fm *= oa.k_awsem
frag_table_rmin = 0
frag_table_rmax = 5 # in nm
frag_table_dr = 0.01
r_array = np.arange(frag_table_rmin, frag_table_rmax, frag_table_dr)
number_of_atoms = oa.natoms
r_table_size = int((frag_table_rmax - frag_table_rmin)/frag_table_dr) # 500 here.
raw_frag_table = np.zeros((number_of_atoms, 6*(1+max_seq_sep), r_table_size))
data_dic = {}
for i in range(oa.natoms):
if i in oa.ca:
res_id = oa.resi[i] # oa.resi start with 0, but pdb residue id start with 1
data_dic[("CA", 1+int(res_id))] = i
if i in oa.cb:
res_id = oa.resi[i]
data_dic[("CB", 1+int(res_id))] = i
# print(oa.res_type)
# print(oa.resi)
# print(data_dic)
frag_location_pre = os.path.dirname(frag_file_list_file)
# frag_file_list_file = frag_location_pre + "frags.mem"
# frag_table_file = frag_location_pre + "frag_table.npy"
frag_table_file = npy_frag_table
if os.path.isfile(frag_table_file) and UseSavedFragTable:
print(f"Reading Fragment table from {frag_table_file}.")
frag_table, interaction_list, interaction_pair_to_bond_index = np.load(frag_table_file, allow_pickle=True)
print(f"Fragment table loaded, number of bonds: {len(interaction_list)}")
frag_file_list = []
else:
print(f"Loading Fragment files(Gro files)")
frag_file_list = pd.read_csv(frag_file_list_file, skiprows=4, sep="\s+", names=["location", "target_start", "fragment_start", "frag_len", "weight"])
interaction_list = set()
for frag_index in range(len(frag_file_list)):
location = frag_file_list["location"].iloc[frag_index]
frag_name = os.path.join(frag_location_pre, location)
frag_len = frag_file_list["frag_len"].iloc[frag_index]
weight = frag_file_list["weight"].iloc[frag_index]
target_start = frag_file_list["target_start"].iloc[frag_index] # residue id
fragment_start = frag_file_list["fragment_start"].iloc[frag_index] # residue id
frag = pd.read_csv(frag_name, skiprows=2, sep="\s+", header=None, names=["Res_id", "Res", "Type", "i", "x", "y", "z"])
frag = frag.query(f"Res_id >= {fragment_start} and Res_id < {fragment_start+frag_len} and (Type == 'CA' or Type == 'CB')")
w_m = weight
gamma_ij = 1
f = frag.values
for i in range(len(frag)):
for j in range(i, len(frag)):
res_id_i = frag["Res_id"].iloc[i]
res_id_j = frag["Res_id"].iloc[j]
target_res_id_i = frag["Res_id"].iloc[i] - fragment_start + target_start
target_res_id_j = frag["Res_id"].iloc[j] - fragment_start + target_start
seq_sep = res_id_j - res_id_i
if seq_sep > max_seq_sep:
continue
if seq_sep < min_seq_sep:
continue
try:
i_type = frag["Type"].iloc[i]
j_type = frag["Type"].iloc[j]
correspond_target_i = data_dic[(i_type, int(target_res_id_i))]
correspond_target_j = data_dic[(j_type, int(target_res_id_j))]
correspond_target_i = int(correspond_target_i)
correspond_target_j = int(correspond_target_j)
except Exception as e:
continue
fi_x = f[i][4]
fi_y = f[i][5]
fi_z = f[i][6]
fj_x = f[j][4]
fj_y = f[j][5]
fj_z = f[j][6]
# print("----", fi_x, fi_y, fi_z, fj_x, fj_y, fj_z)
sigma_ij = fm_well_width*seq_sep**0.15
rm = ((fi_x-fj_x)**2 + (fi_y-fj_y)**2 + (fi_z-fj_z)**2)**0.5
i_j_sep = int(correspond_target_j - correspond_target_i)
raw_frag_table[correspond_target_i][i_j_sep] += w_m*gamma_ij*np.exp((r_array-rm)**2/(-2.0*sigma_ij**2))
interaction_list.add((correspond_target_i, correspond_target_j))
if (not os.path.isfile(frag_table_file)) or (not UseSavedFragTable):
# Reduce memory usage.
print("Saving fragment table as npy file to speed up future calculation.")
number_of_bonds = len(interaction_list)
frag_table = np.zeros((number_of_bonds, r_table_size))
interaction_pair_to_bond_index = {}
for index, (i, j) in enumerate(interaction_list):
ij_sep = j - i
assert(ij_sep > 0)
frag_table[index] = raw_frag_table[i][ij_sep]
interaction_pair_to_bond_index[(i,j)] = index
np.save(frag_table_file, (frag_table, interaction_list, interaction_pair_to_bond_index))
print(f"All gro files information have been stored in the {frag_table_file}. \
\nYou might want to set the 'UseSavedFragTable'=True to speed up the loading next time. \
\nBut be sure to remove the .npy file if you modify the .mem file. otherwise it will keep using the old frag memeory.")
# fm = CustomNonbondedForce(f"-k_fm*((v2-v1)*r+v1*r_2-v2*r_1)/(r_2-r_1); \
# v1=frag_table(index_smaller, sep, r_index_1);\
# v2=frag_table(index_smaller, sep, r_index_2);\
# index_smaller=min(index1,index2);\
# sep=abs(index1-index2);\
# r_1=frag_table_rmin+frag_table_dr*r_index_1;\
# r_2=frag_table_rmin+frag_table_dr*r_index_2;\
# r_index_2=r_index_1+1;\
# r_index_1=floor(r/frag_table_dr);")
# for i in range(oa.natoms):
# fm.addParticle([i])
# # add interaction that are cutoff away
# # print(sorted(interaction_list))
# for (i, j) in interaction_list:
# fm.addInteractionGroup([i], [j])
# # add per-particle parameters
# fm.addPerParticleParameter("index")
# for edge case, that r > frag_table_rmax
max_r_index_1 = r_table_size - 2
fm = CustomCompoundBondForce(2, f"-{k_fm}*((v2-v1)*r+v1*r_2-v2*r_1)/(r_2-r_1); \
v1=frag_table(index, r_index_1);\
v2=frag_table(index, r_index_2);\
r_1={frag_table_rmin}+{frag_table_dr}*r_index_1;\
r_2={frag_table_rmin}+{frag_table_dr}*r_index_2;\
r_index_2=r_index_1+1;\
r_index_1=min({max_r_index_1}, floor(r/{frag_table_dr}));\
r=distance(p1, p2);")
for (i, j) in interaction_list:
if caOnly and ((i not in oa.ca) or (j not in oa.ca)):
continue
fm.addBond([i, j], [interaction_pair_to_bond_index[(i,j)]])
fm.addPerBondParameter("index")
fm.addTabulatedFunction("frag_table",
Discrete2DFunction(len(interaction_list), r_table_size, frag_table.T.flatten()))
fm.setForceGroup(forceGroup)
return fm
def read_memory(oa, pdb_file, chain_name, target_start, fragment_start, length, weight, min_seq_sep, max_seq_sep, am_well_width=0.1):
memory_interactions = []
# if not os.path.isfile(pdb_file):
# pdbl = PDBList()
# pdbl.retrieve_pdb_file(pdb_file.split('.')[0].lower(), pdir='.')
# os.rename("pdb%s.ent" % pdb_id, "%s.pdb" % pdb_id)
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
chain = structure[0][chain_name]
residues = [x for x in chain if x.get_full_id()[3][1] in range(fragment_start,fragment_start+length-1)]
for i, residue_i in enumerate(residues):
for j, residue_j in enumerate(residues):
if abs(i-j) > max_seq_sep:
continue
target_index_i = target_start + i - 1
target_index_j = target_start + j - 1
atom_list_i = []
target_atom_list_i = []
atom_list_j = []
target_atom_list_j = []
if i-j >= min_seq_sep: # taking the signed value to avoid double counting
ca_i = residue_i['CA']
atom_list_i.append(ca_i)
target_atom_list_i.append(oa.ca[target_index_i])
ca_j = residue_j['CA']
atom_list_j.append(ca_j)
target_atom_list_j.append(oa.ca[target_index_j])
if not residue_i.get_resname() == "GLY" and oa.cb[target_index_i] >= 0:
cb_i = residue_i['CB']
atom_list_i.append(cb_i)
target_atom_list_i.append(oa.cb[target_index_i])
if not residue_j.get_resname() == "GLY" and oa.cb[target_index_j] >= 0:
cb_j = residue_j['CB']
atom_list_j.append(cb_j)
target_atom_list_j.append(oa.cb[target_index_j])
for atom_i, atom_j in product(atom_list_i, atom_list_j):
particle_1 = target_atom_list_i[atom_list_i.index(atom_i)]
particle_2 = target_atom_list_j[atom_list_j.index(atom_j)]
r_ijm = abs(atom_i - atom_j)/10.0 # convert to nm
sigma_ij = am_well_width*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
w_m = weight
memory_interaction = [particle_1, particle_2, [w_m, gamma_ij, r_ijm, sigma_ij]]
memory_interactions.append(memory_interaction)
return memory_interactions
def associative_memory_term(oa, memories, k_am=0.8368, min_seq_sep=3, max_seq_sep=9, am_well_width=0.1):
# 0.8368 = 0.2 * 4.184 # in kJ/mol, converted from default value in LAMMPS AWSEM
#pdbid #chain #target #fragment #length #weight
# multiply interaction strength by overall scaling
k_am *= oa.k_awsem
am_function = '-k_am*w_m*gamma_ij*exp(-(r-r_ijm)^2/(2*sigma_ij^2))'
am = CustomBondForce(am_function)
am.addGlobalParameter('k_am', k_am)
am.addPerBondParameter('w_m')
am.addPerBondParameter('gamma_ij')
am.addPerBondParameter('r_ijm')
am.addPerBondParameter('sigma_ij')
for memory in memories:
memory_interactions = read_memory(oa, *memory, min_seq_sep, max_seq_sep, am_well_width=am_well_width)
for memory_interaction in memory_interactions:
am.addBond(*memory_interaction)
return am
def density_dependent_associative_memory_term(oa, memories, k_am_dd=1.0, am_dd_min_seq_sep=3, am_dd_max_seq_sep=9, eta_density=50, r_density_min=.45, r_density_max=.65, density_alpha=1.0, density_normalization=2.0, rho0=2.6, am_well_width=0.1, density_min_seq_sep=10, density_only_from_native_contacts=False, density_pdb_file=None, density_chain_name=None, density_native_contact_min_seq_sep=4, density_native_contact_threshold=0.8*nanometers):
k_am_dd *= oa.k_awsem
am_dd = CustomGBForce()
# add all particles to force
for i in range(oa.natoms):
am_dd.addParticle([i])
# add per-particle parameters
am_dd.addPerParticleParameter("index")
# add global parameters
am_dd.addGlobalParameter("k_am_dd", k_am_dd)
am_dd.addGlobalParameter("eta_density", eta_density)
am_dd.addGlobalParameter("r_density_min", r_density_min)
am_dd.addGlobalParameter("r_density_max", r_density_max)
am_dd.addGlobalParameter("density_alpha", density_alpha)
am_dd.addGlobalParameter("density_normalization", density_normalization)
am_dd.addGlobalParameter("rho0", rho0)
# if density_only_from_native_contacts, read structure to get native contacts
if density_only_from_native_contacts:
structure_interactions = read_amhgo_structure(oa, pdb_file=density_pdb_file, chain_name=density_chain_name, amhgo_min_seq_sep=density_native_contact_min_seq_sep, amhgo_contact_threshold=density_native_contact_threshold, amhgo_well_width=0.1) # the well width is not used, so the value doesn't matter
native_contacts = []
for interaction in structure_interactions:
i_index, j_index, [gamma_ij, r_ijN, sigma_ij] = interaction
native_contacts.append((i_index, j_index))
native_contacts.append((j_index, i_index))
# setup tabulated functions and interactions
density_gamma_ij = [0.0]*oa.natoms*oa.natoms
for i in range(oa.natoms):
for j in range(oa.natoms):
if (i in oa.cb or (oa.res_type[oa.resi[i]] == "IGL" and i in oa.ca)) and (j in oa.cb or (oa.res_type[oa.resi[j]] == "IGL" and i in oa.ca)) and abs(oa.resi[i]-oa.resi[j])>=density_min_seq_sep:
if not density_only_from_native_contacts or (i, j) in native_contacts or (j, i) in native_contacts:
density_gamma_ij[i+j*oa.natoms] = 1.0
density_gamma_ij[j+i*oa.natoms] = 1.0
am_dd.addTabulatedFunction("density_gamma_ij", Discrete2DFunction(oa.natoms, oa.natoms, density_gamma_ij))
gamma_ij = [0.0]*oa.natoms*oa.natoms*len(memories)
sigma_ij = [0.1]*oa.natoms*oa.natoms*len(memories)
r_ijm = [0.0]*oa.natoms*oa.natoms*len(memories)
for k, memory in enumerate(memories):
memory_interactions = read_memory(oa, *memory, am_dd_min_seq_sep, am_dd_max_seq_sep, am_well_width=am_well_width)
for memory_interaction in memory_interactions:
i, j, (w_m, gamma, r, sigma) = memory_interaction
gamma_ij[i+j*oa.natoms+k*oa.natoms*oa.natoms] = gamma
gamma_ij[j+i*oa.natoms+k*oa.natoms*oa.natoms] = gamma
sigma_ij[i+j*oa.natoms+k*oa.natoms*oa.natoms] = sigma
sigma_ij[j+i*oa.natoms+k*oa.natoms*oa.natoms] = sigma
r_ijm[i+j*oa.natoms+k*oa.natoms*oa.natoms] = r
r_ijm[j+i*oa.natoms+k*oa.natoms*oa.natoms] = r
am_dd.addTabulatedFunction("gamma_ij", Discrete3DFunction(oa.natoms, oa.natoms, len(memories), gamma_ij))
am_dd.addTabulatedFunction("sigma_ij", Discrete3DFunction(oa.natoms, oa.natoms, len(memories), sigma_ij))
am_dd.addTabulatedFunction("r_ijm", Discrete3DFunction(oa.natoms, oa.natoms, len(memories), r_ijm))
# add computed values
# compute the density
am_dd.addComputedValue("rho", "0.25*density_gamma_ij(index1, index2)*(1+tanh(eta_density*(r-r_density_min)))*(1+tanh(eta_density*(r_density_max-r)))", CustomGBForce.ParticlePair)
# function that determines how the AM term depends on density
#f_string = "0.25*(1-tanh(eta_density*(rho0-rho1)))*(1-tanh(eta_density*(rho0-rho2)))" # both residues must be buried for the interaction to be active
f_string = "1-(0.25*(1-tanh(eta_density*(rho1-rho0)))*(1-tanh(eta_density*(rho2-rho0))))" # one residue being buried is enough for the interaction to be active
# add energy term for each memory
for k, memory in enumerate(memories):
memory_interactions = read_memory(oa, *memory, am_dd_min_seq_sep, am_dd_max_seq_sep, am_well_width=am_well_width)
for memory_interaction in memory_interactions:
i, j, (w_m, gamma, r, sigma) = memory_interaction
am_dd.addEnergyTerm("-k_am_dd*(density_alpha*f*density_normalization*beta_ij+(1-density_alpha)*beta_ij);\
beta_ij=%f*gamma_ij(index1,index2,%d)*exp(-(r-r_ijm(index1,index2,%d))^2/(2*sigma_ij(index1,index2,%d)^2));\
f=%s" % (w_m, k, k, k, f_string), CustomGBForce.ParticlePair)
return am_dd
def read_amhgo_structure(oa, pdb_file, chain_name, amhgo_min_seq_sep=4, amhgo_contact_threshold=0.8*nanometers, amhgo_well_width=0.1):
structure_interactions = []
from Bio.PDB import PDBParser
import itertools
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
chain = structure[0][chain_name]
residues = [x for x in chain]
for i, residue_i in enumerate(residues):
for j, residue_j in enumerate(residues):
ca_list = []
cb_list = []
atom_list_i = []
atom_list_j = []
if i-j >= amhgo_min_seq_sep: # taking the signed value to avoid double counting
ca_i = residue_i['CA']
ca_list.append(ca_i)
atom_list_i.append(ca_i)
ca_j = residue_j['CA']
ca_list.append(ca_j)
atom_list_j.append(ca_j)
if (residue_i.get_resname() != "GLY") and (residue_i.get_resname() != "IGL"):
cb_i = residue_i['CB']
cb_list.append(cb_i)
atom_list_i.append(cb_i)
if (residue_j.get_resname() != "GLY") and (residue_j.get_resname() != "IGL"):
cb_j = residue_j['CB']
cb_list.append(cb_j)
atom_list_j.append(cb_j)
for atom_i, atom_j in itertools.product(atom_list_i, atom_list_j):
r_ijN = abs(atom_i - atom_j)/10.0*nanometers # convert to nm
if r_ijN <= amhgo_contact_threshold:
sigma_ij = amhgo_well_width*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
if atom_i in ca_list:
i_index = oa.ca[i]
if atom_i in cb_list:
i_index = oa.cb[i]
if atom_j in ca_list:
j_index = oa.ca[j]
if atom_j in cb_list:
j_index = oa.cb[j]
structure_interaction = [i_index, j_index, [gamma_ij, r_ijN, sigma_ij]]
# print(i_index, j_index, gamma_ij, r_ijN, sigma_ij)
structure_interactions.append(structure_interaction)
return structure_interactions
def additive_amhgo_term(oa, pdb_file, chain_name, k_amhgo=4.184, amhgo_min_seq_sep=3, amhgo_contact_threshold=0.8*nanometers, amhgo_well_width=0.1, forceGroup=22):
import itertools
# multiply interaction strength by overall scaling
print("AMH-GO structure based term is ON")
k_amhgo *= oa.k_awsem
# create contact force
amhgo = CustomBondForce(f"-{k_amhgo}*gamma_ij*exp(-(r-r_ijN)^2/(2*sigma_ij^2))")
# # add global parameters
amhgo.addPerBondParameter("gamma_ij")
amhgo.addPerBondParameter("r_ijN")
amhgo.addPerBondParameter("sigma_ij")
# create bonds
structure_interactions = read_amhgo_structure(oa, pdb_file, chain_name, amhgo_min_seq_sep, amhgo_contact_threshold, amhgo_well_width=amhgo_well_width)
# print(structure_interactions)
for structure_interaction in structure_interactions:
# print(structure_interaction)
amhgo.addBond(*structure_interaction)
# amhgo.setForceGroup(22)
amhgo.setForceGroup(forceGroup)
return amhgo
def er_term(oa, k_er=4.184, er_min_seq_sep=2, er_cutoff=99.0, er_well_width=0.1, forceGroup=25):
### this is a structure prediction related term; Adapted from Sirovitz Schafer Wolynes 2017 Protein Science;
### See original papers for reference: Make AWSEM AWSEM-ER with Evolutionary restrictions
### ER restrictions can be obtained from multiple sources (RaptorX, deepcontact, and Gremlin)
### term modified from amh-go term, and the current strength seems to be high, and needs to be lowered somehow.
### amh-go normalization factor will be added soon. Based on Eastwood Wolynes 2000 JCP
print("ER term is ON")
import itertools
k_er *= oa.k_awsem
# create contact force
er = CustomBondForce("-k_er*gamma_ij*exp(-(r-r_ijN)^2/(2*sigma_ij^2))")
# # add global parameters
er.addGlobalParameter("k_er", k_er)
er.addPerBondParameter("gamma_ij")
er.addPerBondParameter("r_ijN")
er.addPerBondParameter("sigma_ij")
structure_interactions_er = []
### read in dat files from contact predictions;
in_rnativeCACA = np.loadtxt('go_rnativeCACA.dat')
in_rnativeCACB = np.loadtxt('go_rnativeCACB.dat')
in_rnativeCBCB = np.loadtxt('go_rnativeCBCB.dat')
for i in range(oa.nres):
for j in range(oa.nres):
if abs(i-j) >= er_min_seq_sep and in_rnativeCACA[i][j]<er_cutoff:
sigma_ij = er_well_width*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
r_ijN = in_rnativeCACA[i][j]/10.0*nanometers
structure_interactions_er.append([oa.ca[i], oa.ca[j], [gamma_ij, r_ijN, sigma_ij]])
if abs(i-j) >= er_min_seq_sep and in_rnativeCACB[i][j]<er_cutoff and oa.cb[j]!= -1:
sigma_ij = er_well_width*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
r_ijN = in_rnativeCACB[i][j]/10.0*nanometers
structure_interactions_er.append([oa.ca[i], oa.cb[j], [gamma_ij, r_ijN, sigma_ij]])
if abs(i-j) >= er_min_seq_sep and in_rnativeCBCB[i][j]<er_cutoff and oa.cb[j]!= -1 and oa.cb[i]!= -1:#oa.res_type[oa.resi[i]] != "IGL" and oa.res_type[oa.resi[j]] != "IGL":
sigma_ij = er_well_width*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
r_ijN = in_rnativeCBCB[i][j]/10.0*nanometers
structure_interactions_er.append([oa.cb[i], oa.cb[j], [gamma_ij, r_ijN, sigma_ij]])
# print([i, j, oa.res_type[oa.resi[i]], oa.res_type[oa.resi[j]],oa.cb[i], oa.cb[j], [gamma_ij, r_ijN, sigma_ij]])
# create bonds
for structure_interaction_er in structure_interactions_er:
er.addBond(*structure_interaction_er)
er.setForceGroup(forceGroup)
return er
def machine_learning_term(oa, k=1*kilocalorie_per_mole, dataFile="dist.npz", UseSavedFile=False, saved_file="ml_data.npz", forceGroup=4):
k_ml = k.value_in_unit(kilojoule_per_mole) # convert to kilojoule_per_mole, openMM default uses kilojoule_per_mole as energy.
k_ml = k_ml * oa.k_awsem
x = [0.0, 2.0, 3.5, 4.25, 4.75, 5.25, 5.75, 6.25, 6.75, 7.25, 7.75, 8.25, 8.75, 9.25, 9.75, 10.25, 10.75, 11.25, 11.75, 12.25, 12.75, 13.25, 13.75, 14.25, 14.75, 15.25, 15.75, 16.25, 16.75, 17.25, 17.75, 18.25, 18.75, 19.25, 19.75]
num_of_points = 100
if UseSavedFile and os.path.isfile(saved_file):
data = np.load(saved_file)
index_array = data["index_array"]
interaction_array = data["interaction_array"]
else:
# spline fit
a = np.load(dataFile)
distspline = a['distspline']
n = distspline.shape[0]
interaction_list = []
index_list = []
xnew = np.linspace(min(x), max(x), num=num_of_points, endpoint=True)
for i in range(n):
for j in range(i+1, n):
if np.alltrue(distspline[i][j] == 0):
continue
y = distspline[i][j]
f = interp1d(x, y)
ynew = f(xnew)
interaction_list.append(ynew)
index_list.append([i, j])
index_array = np.array(index_list)
interaction_array = np.array(interaction_list)
np.savez(saved_file, index_array=index_array, interaction_array=interaction_array)
interaction_n = index_array.shape[0]
r_max = max(x)
r_min = min(x)
dr = (r_max-r_min)/(num_of_points-1)
max_r_index_1 = num_of_points - 2
ml = CustomCompoundBondForce(2, f"{k_ml}*((v2-v1)*r+v1*r_2-v2*r_1)/(r_2-r_1); \
v1=ml_table(index, r_index_1);\
v2=ml_table(index, r_index_2);\
r_1={r_min}+{dr}*r_index_1;\
r_2={r_min}+{dr}*r_index_2;\
r_index_2=r_index_1+1;\
r_index_1=min({max_r_index_1}, floor(r/{dr}));\
r=min(r_raw, {r_max});\
r_raw=distance(p1, p2)*10;")
cb_fixed = [x if x > 0 else y for x,y in zip(oa.cb,oa.ca)]
for idx, index_pair in enumerate(index_array):
resi,resj = index_pair
i = cb_fixed[resi]
j = cb_fixed[resj]
ml.addBond([i, j], [idx])
ml.addPerBondParameter("index")
ml.addTabulatedFunction("ml_table",
Discrete2DFunction(interaction_n, num_of_points, interaction_array.T.flatten()))
ml.setForceGroup(forceGroup)
return ml
def machine_learning_dihedral_omega_angle_term(oa, k=1*kilocalorie_per_mole, dataFile="omega.npz", UseSavedFile=False, saved_file="ml_data.npz", forceGroup=4):
k_ml_angle = k.value_in_unit(kilojoule_per_mole) # convert to kilojoule_per_mole, openMM default uses kilojoule_per_mole as energy.
k_ml_angle = k_ml_angle * oa.k_awsem
omega = np.load(dataFile)
omegaspline = omega["omegaspline"]
omega = "-3.53429174 -3.27249235 -3.01069296 -2.74889357 -2.48709418 -2.2252948\
-1.96349541 -1.70169602 -1.43989663 -1.17809725 -0.91629786 -0.65449847\
-0.39269908 -0.13089969 0.13089969 0.39269908 0.65449847 0.91629786\
1.17809725 1.43989663 1.70169602 1.96349541 2.2252948 2.48709418\
2.74889357 3.01069296 3.27249235 3.53429174"
omega_x = [float(a) for a in omega.split()]
# spline fit
x = omega_x
spline = omegaspline
num_of_points = 100
n = spline.shape[0]
interaction_list = []
index_list = []
xnew = np.linspace(min(x), max(x), num=num_of_points, endpoint=True)
for i in range(n):
for j in range(i+1, n):
if np.alltrue(spline[i][j] == 0):
continue
y = spline[i][j]
f = interp1d(x, y, kind='cubic')
ynew = f(xnew)
interaction_list.append(ynew)
index_list.append([i, j])
index_array = np.array(index_list)
interaction_array = np.array(interaction_list)
angle_max = max(x)
angle_min = min(x)
dangle = (angle_max-angle_min)/(num_of_points-1)
max_angle_index_1 = num_of_points - 2
interaction_n = index_array.shape[0]
ml = CustomCompoundBondForce(4, f"{k_ml_angle}*omegaEnergy;\
omegaEnergy=((v2-v1)*angle+v1*angle_2-v2*angle_1)/(angle_2-angle_1); \
v1=ml_table(index, angle_index_1);\
v2=ml_table(index, angle_index_2);\
angle_1={angle_min}+{dangle}*angle_index_1;\
angle_2={angle_min}+{dangle}*angle_index_2;\
angle_index_2=angle_index_1+1;\
angle_index_1=min({max_angle_index_1}, floor((angle-{angle_min})/{dangle}));\
angle=dihedral(p1, p2, p3, p4);")
for idx, index_pair in enumerate(index_array):
resi,resj = index_pair
p0 = oa.ca[resi]
p1 = oa.cb[resi]
p2 = oa.cb[resj]
p3 = oa.ca[resj]
if p1 == -1 or p2 == -1:
continue
ml.addBond([p0, p1, p2, p3], [idx])
ml.addPerBondParameter("index")
ml.addTabulatedFunction("ml_table",
Discrete2DFunction(interaction_n, num_of_points, interaction_array.T.flatten()))
ml.setForceGroup(forceGroup)
return ml
def machine_learning_dihedral_theta_angle_term(oa, k=1*kilocalorie_per_mole, dataFile="theta.npz", forceGroup=4):
k_ml_angle = k.value_in_unit(kilojoule_per_mole) # convert to kilojoule_per_mole, openMM default uses kilojoule_per_mole as energy.
k_ml_angle = k_ml_angle * oa.k_awsem
theta = np.load(dataFile)
thetaspline = theta["thetaspline"]
theta = "-3.53429174 -3.27249235 -3.01069296 -2.74889357 -2.48709418 -2.2252948\
-1.96349541 -1.70169602 -1.43989663 -1.17809725 -0.91629786 -0.65449847\
-0.39269908 -0.13089969 0.13089969 0.39269908 0.65449847 0.91629786\
1.17809725 1.43989663 1.70169602 1.96349541 2.2252948 2.48709418\
2.74889357 3.01069296 3.27249235 3.53429174"
theta_x = [float(a) for a in theta.split()]
# spline fit
x = theta_x
spline = thetaspline
num_of_points = 100
n = spline.shape[0]
interaction_list = []
index_list = []
xnew = np.linspace(min(x), max(x), num=num_of_points, endpoint=True)
for i in range(n):
for j in range(i+1, n):
if np.alltrue(spline[i][j] == 0):
continue
y = spline[i][j]
f = interp1d(x, y, kind='cubic')
ynew = f(xnew)
interaction_list.append(ynew)
index_list.append([i, j])
index_array = np.array(index_list)
interaction_array = np.array(interaction_list)
angle_max = max(x)
angle_min = min(x)
dangle = (angle_max-angle_min)/(num_of_points-1)
max_angle_index_1 = num_of_points - 2
interaction_n = index_array.shape[0]
ml = CustomCompoundBondForce(4, f"{k_ml_angle}*omegaEnergy;\
omegaEnergy=((v2-v1)*angle+v1*angle_2-v2*angle_1)/(angle_2-angle_1); \
v1=ml_table(index, angle_index_1);\
v2=ml_table(index, angle_index_2);\
angle_1={angle_min}+{dangle}*angle_index_1;\
angle_2={angle_min}+{dangle}*angle_index_2;\
angle_index_2=angle_index_1+1;\
angle_index_1=min({max_angle_index_1}, floor((angle-{angle_min})/{dangle}));\
angle=dihedral(p1, p2, p3, p4);")
for idx, index_pair in enumerate(index_array):
resi,resj = index_pair
p0 = oa.n[resi]
p1 = oa.ca[resi]
p2 = oa.cb[resi]
p3 = oa.cb[resj]
if p0 == -1 or p2 == -1 or p3 == -1:
continue
ml.addBond([p0, p1, p2, p3], [idx])
ml.addPerBondParameter("index")
ml.addTabulatedFunction("ml_table",
Discrete2DFunction(interaction_n, num_of_points, interaction_array.T.flatten()))
ml.setForceGroup(forceGroup)
return ml
def machine_learning_dihedral_phi_angle_term(oa, k=1*kilocalorie_per_mole, dataFile="phi.npz", forceGroup=4):
k_ml_angle = k.value_in_unit(kilojoule_per_mole) # convert to kilojoule_per_mole, openMM default uses kilojoule_per_mole as energy.
k_ml_angle = k_ml_angle * oa.k_awsem
phi = np.load(dataFile)
phispline = phi["phispline"]
phi = "-0.39269908 -0.13089969 0.13089969 0.39269908 0.65449847 0.91629786\
1.17809725 1.43989663 1.70169602 1.96349541 2.2252948 2.48709418\
2.74889357 3.01069296 3.27249235 3.53429174"
phi_x = [float(a) for a in phi.split()]
# spline fit
x = phi_x
spline = phispline
num_of_points = 100
n = spline.shape[0]
interaction_list = []
index_list = []
xnew = np.linspace(min(x), max(x), num=num_of_points, endpoint=True)
for i in range(n):
for j in range(i+1, n):
if np.alltrue(spline[i][j] == 0):
continue
y = spline[i][j]
f = interp1d(x, y, kind='cubic')
ynew = f(xnew)
interaction_list.append(ynew)
index_list.append([i, j])
index_array = np.array(index_list)
interaction_array = np.array(interaction_list)
angle_max = max(x)
angle_min = min(x)
dangle = (angle_max-angle_min)/(num_of_points-1)
max_angle_index_1 = num_of_points - 2
interaction_n = index_array.shape[0]
ml = CustomCompoundBondForce(3, f"{k_ml_angle}*omegaEnergy;\
omegaEnergy=((v2-v1)*angle+v1*angle_2-v2*angle_1)/(angle_2-angle_1); \
v1=ml_table(index, angle_index_1);\
v2=ml_table(index, angle_index_2);\
angle_1={angle_min}+{dangle}*angle_index_1;\
angle_2={angle_min}+{dangle}*angle_index_2;\
angle_index_2=angle_index_1+1;\
angle_index_1=min({max_angle_index_1}, floor((angle-{angle_min})/{dangle}));\
angle=angle(p1, p2, p3);")
for idx, index_pair in enumerate(index_array):
resi,resj = index_pair
p0 = oa.ca[resi]
p1 = oa.cb[resi]
p2 = oa.cb[resj]
if p1 == -1 or p2 == -1:
continue
ml.addBond([p0, p1, p2], [idx])
ml.addPerBondParameter("index")
ml.addTabulatedFunction("ml_table",
Discrete2DFunction(interaction_n, num_of_points, interaction_array.T.flatten()))
ml.setForceGroup(forceGroup)
return ml
'''
# will be deleted in the future.
def read_reference_structure_for_q_calculation(oa, pdb_file, chain_name, min_seq_sep=3, max_seq_sep=np.inf, contact_threshold=0.8*nanometers):
structure_interactions = []
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
chain = structure[0][chain_name]
residues = [x for x in chain]
for i, residue_i in enumerate(residues):
for j, residue_j in enumerate(residues):
ca_list = []
cb_list = []
atom_list_i = []
atom_list_j = []
if i-j >= min_seq_sep and i-j <= max_seq_sep: # taking the signed value to avoid double counting
ca_i = residue_i['CA']
ca_list.append(ca_i)
atom_list_i.append(ca_i)
ca_j = residue_j['CA']
ca_list.append(ca_j)
atom_list_j.append(ca_j)
if not residue_i.get_resname() == "GLY":
cb_i = residue_i['CB']
cb_list.append(cb_i)
atom_list_i.append(cb_i)
if not residue_j.get_resname() == "GLY":
cb_j = residue_j['CB']
cb_list.append(cb_j)
atom_list_j.append(cb_j)
for atom_i, atom_j in product(atom_list_i, atom_list_j):
r_ijN = abs(atom_i - atom_j)/10.0*nanometers # convert to nm
if r_ijN <= contact_threshold:
sigma_ij = 0.1*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
if atom_i in ca_list:
i_index = oa.ca[i]
if atom_i in cb_list:
i_index = oa.cb[i]
if atom_j in ca_list:
j_index = oa.ca[j]
if atom_j in cb_list:
j_index = oa.cb[j]
structure_interaction = [i_index, j_index, [gamma_ij, r_ijN, sigma_ij]]
structure_interactions.append(structure_interaction)
return structure_interactions
def read_reference_structure_for_q_calculation_2(oa, pdb_file, min_seq_sep=3, max_seq_sep=np.inf, contact_threshold=0.8*nanometers):
# default use all chains in pdb file.
structure_interactions = []
parser = PDBParser()
structure = parser.get_structure('X', pdb_file)
model = structure[0]
chain_start = 0
count = 0
for chain in model.get_chains():
chain_start += count
count = 0
for i, residue_i in enumerate(chain.get_residues()):
count += 1
# print(i, residue_i)
for j, residue_j in enumerate(chain.get_residues()):
ca_list = []
cb_list = []
atom_list_i = []
atom_list_j = []
if i-j >= min_seq_sep and i-j <= max_seq_sep: # taking the signed value to avoid double counting
ca_i = residue_i['CA']
ca_list.append(ca_i)
atom_list_i.append(ca_i)
ca_j = residue_j['CA']
ca_list.append(ca_j)
atom_list_j.append(ca_j)
if not residue_i.get_resname() == "GLY":
cb_i = residue_i['CB']
cb_list.append(cb_i)
atom_list_i.append(cb_i)
if not residue_j.get_resname() == "GLY":
cb_j = residue_j['CB']
cb_list.append(cb_j)
atom_list_j.append(cb_j)
for atom_i, atom_j in product(atom_list_i, atom_list_j):
r_ijN = abs(atom_i - atom_j)/10.0*nanometers # convert to nm
if r_ijN <= contact_threshold:
sigma_ij = 0.1*abs(i-j)**0.15 # 0.1 nm = 1 A
gamma_ij = 1.0
if atom_i in ca_list:
i_index = oa.ca[i+chain_start]
if atom_i in cb_list:
i_index = oa.cb[i+chain_start]
if atom_j in ca_list:
j_index = oa.ca[j+chain_start]
if atom_j in cb_list:
j_index = oa.cb[j+chain_start]
structure_interaction = [i_index, j_index, [gamma_ij, r_ijN, sigma_ij]]
structure_interactions.append(structure_interaction)
return structure_interactions
'''
|
N = int(input())
y = list(map(int, input().split()))
sy = sum(y)
print(*[sy - yi * (N - 1) for yi in y])
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class CloudAdmin(admin.ModelAdmin):
search_fields = ("name",)
list_display = ("name",)
save_on_top = True
|
import sys
import qtvscodestyle as qtvsc
from qtvscodestyle.qtpy.QtWidgets import QApplication, QDialog, QFrame, QGridLayout, QLabel
app = QApplication(sys.argv)
main_win = QDialog()
# Create line=======================
h_line, v_line = QFrame(), QFrame()
h_line.setProperty("type", "h_line")
v_line.setProperty("type", "v_line")
# ==================================
area1, area2, area3 = QLabel("Area 1"), QLabel("Area 2"), QLabel("Area 3")
# Setup layout
g_layout = QGridLayout(main_win)
g_layout.addWidget(area1, 0, 0)
g_layout.addWidget(h_line, 1, 0)
g_layout.addWidget(area2, 2, 0)
g_layout.addWidget(v_line, 0, 1, 3, 1)
g_layout.addWidget(area3, 0, 2, 3, 1)
main_win.setMinimumSize(300, 200)
app.setStyleSheet(qtvsc.load_stylesheet())
main_win.show()
app.exec()
|
# Generated by Django 3.1.1 on 2020-11-12 21:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20201112_2107'),
]
operations = [
migrations.AlterField(
model_name='order',
name='orderTrakingNumber',
field=models.CharField(blank=True, default='5362710199', editable=False, max_length=10, unique=True),
),
]
|
#!/usr/bin/env python
#
# For use as appointed as SSH_ASKPASS when starting ssh-agent, call
# on the user seconds factor instead.
#
# Needs https://github.com/duosecurity/duo_client_python installed
# or cloned and this script copied to the cloned directory.
#
# duo_config should define ikey, skey and api_hostname per your
# configuration (see duosecurity.com).
#
import duo_config
import duo_client
import sys
import os
import requests
import urllib
question = "Nothing to ask about."
if len(sys.argv) == 2:
question = sys.argv[1]
authclient = duo_client.Auth(duo_config.ikey,
duo_config.skey,
duo_config.api_hostname)
push=urllib.urlencode({'Question':question})
response = authclient.auth(username = os.getlogin(),
factor = 'push',
async = False,
device = 'auto',
type = 'ssh key usage',
pushinfo = push)
if response.has_key('status') and response['status'] == 'allow':
sys.exit(0)
sys.exit(1)
|
#
# PySNMP MIB module ASKEY-ENTITY-ALARM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASKEY-ENTITY-ALARM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:29:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
AskeyVendorTypeEnum, ipDslam = mibBuilder.importSymbols("ASKEY-DSLAM-MIB", "AskeyVendorTypeEnum", "ipDslam")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
IpAddress, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter32, Counter64, Unsigned32, Bits, Gauge32, NotificationType, ModuleIdentity, Integer32, MibIdentifier, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter32", "Counter64", "Unsigned32", "Bits", "Gauge32", "NotificationType", "ModuleIdentity", "Integer32", "MibIdentifier", "ObjectIdentity")
DisplayString, TruthValue, AutonomousType, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "AutonomousType", "TextualConvention")
askeyEntityAlarmMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12))
askeyEntityAlarmMIB.setRevisions(('1904-11-22 15:41', '1904-10-13 14:00', '1904-08-10 16:10', '1904-08-10 10:10', '1904-07-30 14:10',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: askeyEntityAlarmMIB.setRevisionsDescriptions(('Add aeRelayInTable for alarm relay-in ports configuration.', 'Replace *VendorTypeEnum with *PlannedVendorTypeEnum and *OnlineendorTypeEnum. More detail information.', 'Remove import from ENTITY-MIB, use Unsigned32 instead of PhysicalIndex. This make this MIB has more compatibility', 'Remove unused objects.', 'Import PhysicalIndex from ENTITY-MIB, remove aeAlarmNotification from aeAlarmTable.',))
if mibBuilder.loadTexts: askeyEntityAlarmMIB.setLastUpdated('0411221541Z')
if mibBuilder.loadTexts: askeyEntityAlarmMIB.setOrganization('Askey Computer Corp.')
if mibBuilder.loadTexts: askeyEntityAlarmMIB.setContactInfo('Caleb Chiu E-mail: caleb@askey.com.tw ')
if mibBuilder.loadTexts: askeyEntityAlarmMIB.setDescription('The MIB module presents all managed objects extended to ENTITY-MIB, including alarm definition, alarm ,monitoring and entity status.')
class AskeyAlarmBit(TextualConvention, Integer32):
description = 'An arbitrary value which uniquely identifies a type of alarm notified by hardware. It implies the bit position of alarm represented by a 32-bits unsigned integer, from 0 (bit0) to 31 (bit31). The value is a small positive integer; index values for different alarm bits are not necessarily contiguous. Related information about all alarm bits are defined in aeAlarmDefinitionTable.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 31)
class AskeyAlarmSeverity(TextualConvention, Integer32):
description = 'Defines the level of alarm severity.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))
namedValues = NamedValues(("critical", 1), ("major", 2), ("minor", 3), ("warning", 4), ("info", 5), ("none", 99))
class AskeyAlarmName(DisplayString):
description = 'Defines the name of alarm.'
status = 'current'
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 32)
class AskeyAlarmList(TextualConvention, Unsigned32):
description = 'For each set of physical entities sharing a unique aePhysicalVendorType, there an exists unique alarm space. An unsigned 32-bit integer represents an alarm list, in which each bit represents an alarm type. The LSB bit (bit 0) represent alarm types identified by the integer values 1. The bit 1 represent alarm types identified by the integer values 2, and so forth. So The MSB bit (bit 31) represent alarm types identified by the integer values 32. MSB LSB 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | | | | | | | | | | | | | | | +- Alarm 24 .... | | | | | | | +- Alarm 0 | | | | | | +--- Alarm 25 | | | | | | +--- Alarm 1 | | | | | +----- Alarm 26 | | | | | +----- Alarm 2 | | | | +------- Alarm 27 | | | | +------- Alarm 3 | | | +--------- Alarm 28 | | | +--------- Alarm 4 | | +----------- Alarm 29 | | +----------- Alarm 5 | +------------- Alarm 30 | +------------- Alarm 6 +--------------- Alarm 31 +--------------- Alarm 7 '
status = 'current'
class AskeyAlarmAction(TextualConvention, Integer32):
description = 'Defines the action of alarm.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("set", 1), ("clear", 2))
askeyEntityAlarmMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1))
askeyEntityAlarmMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 2))
aeAlarmDefinition = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2))
aeAlarmMonitoring = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3))
aeAlarmHistory = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4))
aeAlarmDefinitionTable = MibTable((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1), )
if mibBuilder.loadTexts: aeAlarmDefinitionTable.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefinitionTable.setDescription('This table contains alarm definition. Each row defines a single alarm per vendor type, NOT an alarm list.')
aeAlarmDefinitionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1), ).setIndexNames((0, "ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefVendorTypeEnum"), (0, "ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefType"))
if mibBuilder.loadTexts: aeAlarmDefinitionEntry.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefinitionEntry.setDescription('Information about an alarm definition to help an NMS find the meaning of an alarm, and object (aeAlarmFiltered) to help an NMS stop monitoring a type of alarm.')
aeAlarmDefVendorTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 1), AskeyVendorTypeEnum())
if mibBuilder.loadTexts: aeAlarmDefVendorTypeEnum.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefVendorTypeEnum.setDescription('A reference of the vendor-specific hardware type of the physical entity.')
aeAlarmDefType = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 2), AskeyAlarmBit()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmDefType.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefType.setDescription('The type of this alarm. It represents the position of bit in the AskeyAlarmList.')
aeAlarmDefName = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 3), AskeyAlarmName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmDefName.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefName.setDescription('The abbreviation of an alarm type, e.g. LOS, LOF, AIS.')
aeAlarmDefDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmDefDescr.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefDescr.setDescription('A textual description of an alarm type. This object should contain the description and trouble-shooting of a alarm.')
aeAlarmDefSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 5), AskeyAlarmSeverity().clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aeAlarmDefSeverity.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefSeverity.setDescription('The severity of an alarm type.')
aeAlarmDefFiltered = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 6), TruthValue().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aeAlarmDefFiltered.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefFiltered.setDescription('To determine whether enable this alarm trap.')
aeAlarmDefSuppressedby = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 2, 1, 1, 7), AskeyAlarmList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aeAlarmDefSuppressedby.setStatus('current')
if mibBuilder.loadTexts: aeAlarmDefSuppressedby.setDescription('To determine which alarms make this alarm suppressed.')
aeAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1), )
if mibBuilder.loadTexts: aeAlarmTable.setStatus('current')
if mibBuilder.loadTexts: aeAlarmTable.setDescription('This table contains one row per physical entity. Information in this table are derived from AskeyAlarmList and aeAlarmDefinitionTable.')
aeAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1), ).setIndexNames((0, "ASKEY-ENTITY-ALARM-MIB", "aeAlarmPhysicalIndex"))
if mibBuilder.loadTexts: aeAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: aeAlarmEntry.setDescription('The status of an physical entity about active alarm includes highest-level alarm severity and service affection.')
aeAlarmPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmPhysicalIndex.setStatus('current')
if mibBuilder.loadTexts: aeAlarmPhysicalIndex.setDescription('Physical entity index, encoded by shelf/slot/port, 2 decimal digits each field.')
aeAlarmPlannedVendorTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 2), AskeyVendorTypeEnum()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmPlannedVendorTypeEnum.setStatus('current')
if mibBuilder.loadTexts: aeAlarmPlannedVendorTypeEnum.setDescription('An enumeration of the planned vendor-specific hardware type of the physical entity.')
aeAlarmOnlineVendorTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 3), AskeyVendorTypeEnum()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmOnlineVendorTypeEnum.setStatus('current')
if mibBuilder.loadTexts: aeAlarmOnlineVendorTypeEnum.setDescription('An enumeration of the online vendor-specific hardware type of the physical entity.')
aeAlarmList = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 4), AskeyAlarmList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmList.setStatus('current')
if mibBuilder.loadTexts: aeAlarmList.setDescription("Defines the active alarms on physical entity. Alarm information is represented by AskeyAlarmList, A manager can use bitwise integer operation to retrieve what type of alarms are asserted. All the alarm types are defined in aeAlarmDefinitionTable. A bit value of '0' implies no such type of alarm and in contrast a bit value of '1' implies there is an such type of alarm asserted. All unused bits (or no alarm) are filled by 0.")
aeAlarmLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmLastChange.setStatus('current')
if mibBuilder.loadTexts: aeAlarmLastChange.setDescription('The system time of the alarm vector is changed.')
aeAlarmSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 3, 1, 1, 6), AskeyAlarmSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeAlarmSeverity.setStatus('current')
if mibBuilder.loadTexts: aeAlarmSeverity.setDescription('The highest level of alarm severity of a physical entity.')
aeHistoryAlarmTableSize = MibScalar((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmTableSize.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmTableSize.setDescription('The size of the aeHistoryAlarmTable in rows. A value of 0 means no alarm logged in this table.')
aeHistoryAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2), )
if mibBuilder.loadTexts: aeHistoryAlarmTable.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmTable.setDescription('This table contains all active alarms currently exist in the overall system. There is one row per active alarm of physical entity. When a manager misses those traps originated by a agent, it can also retrieve those events from this table.')
aeHistoryAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1), ).setIndexNames((0, "ASKEY-ENTITY-ALARM-MIB", "aeHistoryAlarmIndex"))
if mibBuilder.loadTexts: aeHistoryAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmEntry.setDescription('The history of alarms recently ever existed in the system but removed.')
aeHistoryAlarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2146483647)))
if mibBuilder.loadTexts: aeHistoryAlarmIndex.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmIndex.setDescription('The index of this entry.The value of index will wrap back to 1 while it reaching 2^31-1. So a manager must notice this condition by checking aeHistoryAlarmTime.')
aeHistoryAlarmPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmPhysicalIndex.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmPhysicalIndex.setDescription('The index of physical entity for this alarm.')
aeHistoryAlarmPlannedVendorTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 3), AskeyVendorTypeEnum()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmPlannedVendorTypeEnum.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmPlannedVendorTypeEnum.setDescription('An enumeration of the planned vendor-specific hardware type of the physical entity. ')
aeHistoryAlarmOnlineVendorTypeEnum = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 4), AskeyVendorTypeEnum()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmOnlineVendorTypeEnum.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmOnlineVendorTypeEnum.setDescription('An enumeration of the online vendor-specific hardware type of the physical entity. ')
aeHistoryAlarmType = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 5), AskeyAlarmBit()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmType.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmType.setDescription('The type of this alarm.')
aeHistoryAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmTime.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmTime.setDescription('The value of sysUpTime at the time this alarm asserted.')
aeHistoryAlarmAction = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 4, 2, 1, 7), AskeyAlarmAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeHistoryAlarmAction.setStatus('current')
if mibBuilder.loadTexts: aeHistoryAlarmAction.setDescription('The action of this alarm.')
aeRelayInTable = MibTable((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5), )
if mibBuilder.loadTexts: aeRelayInTable.setStatus('current')
if mibBuilder.loadTexts: aeRelayInTable.setDescription('This table contains one row per relay in entity. Administrator use this table to management the normal/abnormal status of relay-in alarm ports.')
aeRelayInEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5, 1), ).setIndexNames((0, "ASKEY-ENTITY-ALARM-MIB", "aeRelayInPhysicalIndex"))
if mibBuilder.loadTexts: aeRelayInEntry.setStatus('current')
if mibBuilder.loadTexts: aeRelayInEntry.setDescription('The configuration and status of an physical entity about alarm relay-in ports.')
aeRelayInPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeRelayInPhysicalIndex.setStatus('current')
if mibBuilder.loadTexts: aeRelayInPhysicalIndex.setDescription('Physical entity index, encoded by shelf/slot/port, 2 decimal digits each field.')
aeRelayInName = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aeRelayInName.setStatus('current')
if mibBuilder.loadTexts: aeRelayInName.setDescription('User defined name for this relay-in port.')
aeRelayInNormalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("open", 1), ("close", 2), ("disable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aeRelayInNormalStatus.setStatus('current')
if mibBuilder.loadTexts: aeRelayInNormalStatus.setDescription('User defined status for this relay-in port.')
aeRelayInCurrentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("open", 1), ("close", 2), ("disable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aeRelayInCurrentStatus.setStatus('current')
if mibBuilder.loadTexts: aeRelayInCurrentStatus.setDescription('Current status for this relay-in port.')
askeyEntityMIBTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 2, 0))
askeyEntityAlarmTrap = NotificationType((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 2, 0, 1)).setObjects(("ASKEY-ENTITY-ALARM-MIB", "aeAlarmPhysicalIndex"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmPlannedVendorTypeEnum"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmOnlineVendorTypeEnum"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmList"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmLastChange"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmSeverity"))
if mibBuilder.loadTexts: askeyEntityAlarmTrap.setStatus('current')
if mibBuilder.loadTexts: askeyEntityAlarmTrap.setDescription('An askeyEntityAlarmClear trap is sent when a entity clears an alarm. It can be utilized by an NMS to trigger alarm definition table maintenance polls. An NMS should periodically check the items of aeHistoryAlarmTable to detect any missed askeyEntityAlarmClear trap events, e.g. due to throttling or transmission loss.')
askeyEntityAlarmConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3))
askeyEntityAlarmCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 1))
askeyEntityAlarmGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 2))
askeyEntityAlarmCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 1, 1)).setObjects(("ASKEY-ENTITY-ALARM-MIB", "askeyEntityAlarmDefinitionGroup"), ("ASKEY-ENTITY-ALARM-MIB", "askeyEntityAlarmMonitoringGroup"), ("ASKEY-ENTITY-ALARM-MIB", "askeyEntityAlarmNotificationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
askeyEntityAlarmCompliance = askeyEntityAlarmCompliance.setStatus('current')
if mibBuilder.loadTexts: askeyEntityAlarmCompliance.setDescription('The compliance statement for SNMP entities which implement the Askey EntityAlarm MIB.')
askeyEntityAlarmDefinitionGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 2, 2)).setObjects(("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefType"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefName"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefDescr"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefSeverity"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefFiltered"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmDefSuppressedby"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
askeyEntityAlarmDefinitionGroup = askeyEntityAlarmDefinitionGroup.setStatus('current')
if mibBuilder.loadTexts: askeyEntityAlarmDefinitionGroup.setDescription('The collection of objects which are used to represent definition of alarms, for which a single agent provides management information.')
askeyEntityAlarmMonitoringGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 2, 3)).setObjects(("ASKEY-ENTITY-ALARM-MIB", "aeAlarmPlannedVendorTypeEnum"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmOnlineVendorTypeEnum"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmList"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmLastChange"), ("ASKEY-ENTITY-ALARM-MIB", "aeAlarmSeverity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
askeyEntityAlarmMonitoringGroup = askeyEntityAlarmMonitoringGroup.setStatus('current')
if mibBuilder.loadTexts: askeyEntityAlarmMonitoringGroup.setDescription('The collection of objects which are used to represent monitoring of alarm status, for which a single agent provides management information.')
askeyEntityAlarmNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 3646, 1300, 2, 12, 3, 2, 4)).setObjects(("ASKEY-ENTITY-ALARM-MIB", "askeyEntityAlarmTrap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
askeyEntityAlarmNotificationsGroup = askeyEntityAlarmNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: askeyEntityAlarmNotificationsGroup.setDescription('The collection of notifications used to indicate Entity MIB data consistency and general status information.')
mibBuilder.exportSymbols("ASKEY-ENTITY-ALARM-MIB", aeAlarmTable=aeAlarmTable, askeyEntityAlarmGroups=askeyEntityAlarmGroups, aeRelayInNormalStatus=aeRelayInNormalStatus, aeHistoryAlarmEntry=aeHistoryAlarmEntry, aeRelayInTable=aeRelayInTable, aeAlarmPlannedVendorTypeEnum=aeAlarmPlannedVendorTypeEnum, aeRelayInCurrentStatus=aeRelayInCurrentStatus, askeyEntityAlarmNotificationsGroup=askeyEntityAlarmNotificationsGroup, AskeyAlarmSeverity=AskeyAlarmSeverity, AskeyAlarmBit=AskeyAlarmBit, aeRelayInName=aeRelayInName, AskeyAlarmList=AskeyAlarmList, aeAlarmDefDescr=aeAlarmDefDescr, aeAlarmDefSuppressedby=aeAlarmDefSuppressedby, askeyEntityAlarmConformance=askeyEntityAlarmConformance, askeyEntityAlarmTrap=askeyEntityAlarmTrap, aeHistoryAlarmTable=aeHistoryAlarmTable, aeAlarmDefName=aeAlarmDefName, aeHistoryAlarmPlannedVendorTypeEnum=aeHistoryAlarmPlannedVendorTypeEnum, askeyEntityAlarmDefinitionGroup=askeyEntityAlarmDefinitionGroup, aeHistoryAlarmIndex=aeHistoryAlarmIndex, askeyEntityMIBTrapPrefix=askeyEntityMIBTrapPrefix, aeAlarmOnlineVendorTypeEnum=aeAlarmOnlineVendorTypeEnum, aeHistoryAlarmTime=aeHistoryAlarmTime, aeHistoryAlarmTableSize=aeHistoryAlarmTableSize, aeAlarmDefFiltered=aeAlarmDefFiltered, aeHistoryAlarmPhysicalIndex=aeHistoryAlarmPhysicalIndex, aeHistoryAlarmType=aeHistoryAlarmType, aeAlarmSeverity=aeAlarmSeverity, askeyEntityAlarmMIBObjects=askeyEntityAlarmMIBObjects, aeRelayInPhysicalIndex=aeRelayInPhysicalIndex, askeyEntityAlarmMIBTraps=askeyEntityAlarmMIBTraps, aeAlarmDefSeverity=aeAlarmDefSeverity, aeRelayInEntry=aeRelayInEntry, PYSNMP_MODULE_ID=askeyEntityAlarmMIB, aeAlarmMonitoring=aeAlarmMonitoring, aeAlarmList=aeAlarmList, aeAlarmDefinitionEntry=aeAlarmDefinitionEntry, askeyEntityAlarmMIB=askeyEntityAlarmMIB, AskeyAlarmAction=AskeyAlarmAction, aeAlarmEntry=aeAlarmEntry, aeHistoryAlarmAction=aeHistoryAlarmAction, askeyEntityAlarmMonitoringGroup=askeyEntityAlarmMonitoringGroup, askeyEntityAlarmCompliances=askeyEntityAlarmCompliances, aeAlarmHistory=aeAlarmHistory, AskeyAlarmName=AskeyAlarmName, aeAlarmDefinitionTable=aeAlarmDefinitionTable, aeAlarmDefinition=aeAlarmDefinition, aeAlarmDefVendorTypeEnum=aeAlarmDefVendorTypeEnum, aeAlarmPhysicalIndex=aeAlarmPhysicalIndex, aeAlarmDefType=aeAlarmDefType, aeAlarmLastChange=aeAlarmLastChange, aeHistoryAlarmOnlineVendorTypeEnum=aeHistoryAlarmOnlineVendorTypeEnum, askeyEntityAlarmCompliance=askeyEntityAlarmCompliance)
|
# Developed by Vinicius José Fritzen
# Copyright (c) 2019 Vinicius José Fritzen and Albert Angel Lanzarini
import logging
import pytest
from django.test import Client, TestCase
from django.urls import reverse
from escola.models import Conteudo, Profile
logger = logging.getLogger(__name__)
pytestmark = pytest.mark.django_db
# Testes das views para garantir que aparecem
@pytest.mark.conteudo
def test_create_conteudo_raiz_get(professor_client: Client, tc: TestCase):
response = professor_client.get(reverse('escola:conteudo_add'))
tc.assertTemplateUsed(response, 'escola/conteudo/create_conteudo.html')
@pytest.mark.conteudo
def test_create_conteudo_filho_get(professor_client: Client, tc: TestCase, conteudo: Conteudo):
response = professor_client.get(reverse('escola:conteudo_add', args=(conteudo.pk,)))
tc.assertTemplateUsed(response, 'escola/conteudo/create_conteudo.html')
@pytest.mark.conteudo
def test_update_conteudo_get(conteudo: Conteudo, tc: TestCase, client: Client):
user = conteudo.professor.user
client.force_login(user)
response = client.get(reverse('escola:update-conteudo', args=(conteudo.pk, )))
tc.assertTemplateUsed(response, 'escola/conteudo/create_conteudo.html')
@pytest.mark.conteudo
def test_detail_conteudo_get(conteudo, aluno_client, tc):
response = aluno_client.get(reverse('escola:conteudo-detail', args=(conteudo.pk, )))
tc.assertTemplateUsed(response, 'escola/conteudo_detail.html')
@pytest.mark.conteudo
def test_add_link_conteudo_get(conteudo, tc, client):
user = conteudo.professor.user
client.force_login(user)
p, c = Profile.objects.get_or_create(user=user, defaults={'is_aluno': False, 'is_professor': False})
p.is_professor = True
p.save()
response = client.get(reverse('escola:add-link-conteudo', args=(conteudo.pk, )))
print(response)
tc.assertTemplateUsed(response, 'escola/linkconteudo_form.html')
@pytest.mark.conteudo
def test_add_conteudo_materia_get(materia, client, tc):
user = materia.professor.user
client.force_login(user)
response = client.get(reverse('escola:add-conteudo-materia', args=(materia.pk, )))
tc.assertTemplateUsed(response, 'escola/conteudo/addConteudoToMateria.html')
@pytest.mark.conteudo
def test_meus_conteudos_get(professor_client, tc):
response = professor_client.get(reverse('escola:conteudos-professor'))
tc.assertTemplateUsed(response, 'escola/professor/listConteudos.html')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveSingleTaskForCreatingOrderRedeemRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveSingleTaskForCreatingOrderRedeem')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_CouponNo(self):
return self.get_query_params().get('CouponNo')
def set_CouponNo(self,CouponNo):
self.add_query_param('CouponNo',CouponNo)
def get_UseCoupon(self):
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self,UseCoupon):
self.add_query_param('UseCoupon',UseCoupon)
def get_PromotionNo(self):
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self,PromotionNo):
self.add_query_param('PromotionNo',PromotionNo)
def get_CurrentExpirationDate(self):
return self.get_query_params().get('CurrentExpirationDate')
def set_CurrentExpirationDate(self,CurrentExpirationDate):
self.add_query_param('CurrentExpirationDate',CurrentExpirationDate)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_UsePromotion(self):
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self,UsePromotion):
self.add_query_param('UsePromotion',UsePromotion)
|
num1 = input()
set1 = set(map(int, input().split()))
num2 = input()
set2 = set(map(int, input().split()))
print(len(set1^set2))
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class CurveTracerTest( unittest.TestCase ) :
def test( self ) :
i = IECore.Reader.create( "test/IECore/data/tiff/toTrace.tif" ).read()
o = IECore.ImageThinner()
o( input=i, copyInput=False, channels=IECore.StringVectorData( [ "R" ] ) )
o = IECore.CurveTracer()
c = o( image=i, channelName="R" )
e = IECore.CurvesPrimitiveEvaluator( c )
r = e.createResult()
size = i.dataWindow.size() + IECore.V2i( 1 )
pixels = i["R"].data
pi = 0
for y in range( 0, size.y ) :
for x in range( 0, size.x ) :
if pixels[pi] > 0.5 :
pp = IECore.V3f( x, y, 0 )
e.closestPoint( pp, r )
self.failUnless( abs( ( pp - r.point() ).length() ) < 3 )
pi += 1
def testPixelOutputSpace( self ) :
i = IECore.Reader.create( "test/IECore/data/tiff/singlePixelT.tif" ).read()
o = IECore.CurveTracer()
c = o( image=i, channelName="R", minimumLength=0 )
self.assertEqual( c.bound(), IECore.Box3f( IECore.V3f( 1, 7, 0 ), IECore.V3f( 9, 11, 0 ) ) )
self.failUnless( IECore.V3f( 1, 7, 0 ) in c["P"].data )
self.failUnless( IECore.V3f( 7, 11, 0 ) in c["P"].data )
self.failUnless( IECore.V3f( 9, 7, 0 ) in c["P"].data )
def testUVOutputSpace( self ) :
i = IECore.Reader.create( "test/IECore/data/tiff/singlePixelT.tif" ).read()
o = IECore.CurveTracer()
o["outputSpace"].setValue( "uv" )
c = o( image=i, channelName="R", minimumLength=0 )
self.failUnless( c.bound().min.equalWithAbsError( IECore.V3f( 0.1, 0.5, 0 ), 0.00001 ) )
self.failUnless( c.bound().max.equalWithAbsError( IECore.V3f( 0.633333, 0.7666666, 0 ), 0.00001 ) )
def testObjectOutputSpace( self ) :
i = IECore.Reader.create( "test/IECore/data/tiff/singlePixelT.tif" ).read()
o = IECore.CurveTracer()
o["outputSpace"].setValue( "object" )
c = o( image=i, channelName="R", minimumLength=0 )
self.failUnless( c.bound().min.equalWithAbsError( IECore.V3f( -6, -4, 0 ), 0.00001 ) )
self.failUnless( c.bound().max.equalWithAbsError( IECore.V3f( 2, 0, 0 ), 0.00001 ) )
if __name__ == "__main__":
unittest.main()
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from .models import Link
_alias = Link._meta.get_field('alias')
_url = Link._meta.get_field('url')
class URLShortenerForm(forms.Form):
alias = forms.CharField(
max_length=_alias.max_length,
required=False,
help_text=_("An optional alias you want to generate. "
"One will be chosen automatically if you don't enter one."),
label=_('Alias (optional)'),
validators=[RegexValidator(
regex=r'^[a-zA-Z0-9-_]+$',
code='invalid_alias',
message='Alias can only contain alphabets, numerals, underscores and hyphens',
)]
)
url = forms.URLField(
max_length=_url.max_length,
required=True,
help_text=_("The URL you want to shorten."),
label=_('Long URL to shorten'),
widget=forms.URLInput(
attrs={'placeholder': 'http://www.example.com/index.php?id=321&redirect=1&guest&coords=1,1,3,32,53',
'required': 'true'},
),
)
|
# Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import io
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import (run_unittest, cpython_only, swap_attr)
from test.support.os_helper import (TESTFN, TESTFN_UNICODE, make_bad_fd)
from test.support.warnings_helper import check_warnings
from collections import UserList
import _io # C implementation of io
import _pyio # Python implementation of io
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testBlksize(self):
# test private _blksize attribute
blksize = io.DEFAULT_BUFFER_SIZE
# try to get preferred blksize from stat.st_blksize, if available
if hasattr(os, 'fstat'):
fst = os.fstat(self.f.fileno())
blksize = getattr(fst, 'st_blksize', blksize)
self.assertEqual(self.f._blksize, blksize)
# verify readinto
def testReadintoByteArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
ba = bytearray(b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(ba)
self.assertEqual(ba, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoMemoryview(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
m = memoryview(bytearray(b'abcdefgh'))
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(m, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
m = memoryview(bytearray(b'abcdefgh')).cast('H', shape=[2, 2])
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(bytes(m), b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
a = array('B', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('B', [1, 2, 0, 255, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('b', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('b', [1, 2, 0, -1, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('I', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('I', b'\x01\x02\x00\xffefgh'))
self.assertEqual(n, 4)
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
self.assertRaises(TypeError, self.f.writelines, "abc")
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = self.FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f),
"<%s.FileIO name=%r mode=%r closefd=True>" %
(self.modulename, self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f),
"<%s.FileIO fd=%r mode=%r closefd=True>" %
(self.modulename, self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f),
"<%s.FileIO [closed]>" % (self.modulename,))
def testReprNoCloseFD(self):
fd = os.open(TESTFN, os.O_RDONLY)
try:
with self.FileIO(fd, 'r', closefd=False) as f:
self.assertEqual(repr(f),
"<%s.FileIO name=%r mode=%r closefd=False>" %
(self.modulename, f.name, f.mode))
finally:
os.close(fd)
def testRecursiveRepr(self):
# Issue #25455
with swap_attr(self.f, 'name', self.f):
with self.assertRaises(RuntimeError):
repr(self.f) # Should not crash
def testErrors(self):
f = self.f
self.assertFalse(f.isatty())
self.assertFalse(f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = self.FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertFalse(f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'seekable', 'readable', 'writable',
'read', 'readall', 'readline', 'readlines',
'tell', 'truncate', 'flush']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(TypeError, self.f.readinto)
self.assertRaises(ValueError, self.f.readinto, bytearray(1))
self.assertRaises(TypeError, self.f.seek)
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(TypeError, self.f.write)
self.assertRaises(ValueError, self.f.write, b'')
self.assertRaises(TypeError, self.f.writelines)
self.assertRaises(ValueError, self.f.writelines, b'')
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix uses fstat and returns "[Errno 21]: Is a directory"
try:
self.FileIO('.', 'r')
except OSError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised OSError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(OSError) as cm:
self.FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised OSError")
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except OSError:
pass
self.f = self.FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
class OtherFileTests:
def testAbles(self):
try:
f = self.FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = self.FileIO("/dev/tty", "a")
except OSError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith(('sunos', 'aix')):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = self.FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with self.FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = self.FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = self.FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
@unittest.skipIf(sys.getfilesystemencoding() != 'utf-8',
"test only works for utf-8 filesystems")
def testUtf8BytesOpen(self):
# Opening a UTF-8 bytes filename
try:
fn = TESTFN_UNICODE.encode("utf-8")
except UnicodeEncodeError:
self.skipTest('could not encode %r to utf-8' % TESTFN_UNICODE)
f = self.FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN_UNICODE, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_UNICODE)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.FileIO, fn_with_NUL, 'w')
self.assertRaises(ValueError, self.FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, self.FileIO, -10)
self.assertRaises(OSError, self.FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(OSError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = self.FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = self.FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = self.FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, self.FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, self.FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, self.FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(self.FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
class COtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MIN - 1)
def test_open_code(self):
# Check that the default behaviour of open_code matches
# open("rb")
with self.FileIO(__file__, "rb") as f:
expected = f.read()
with _io.open_code(__file__) as f:
actual = f.read()
self.assertEqual(expected, actual)
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
def test_open_code(self):
# Check that the default behaviour of open_code matches
# open("rb")
with self.FileIO(__file__, "rb") as f:
expected = f.read()
with check_warnings(quiet=True) as w:
# Always test _open_code_with_warning
with _pyio._open_code_with_warning(__file__) as f:
actual = f.read()
self.assertEqual(expected, actual)
self.assertNotEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.instancenorm import InstanceNorm2d
from torchvision.ops import ConvNormActivation
from ..._internally_replaced_utils import load_state_dict_from_url
from ...utils import _log_api_usage_once
from ._utils import grid_sample, make_coords_grid, upsample_flow
__all__ = (
"RAFT",
"raft_large",
"raft_small",
)
_MODELS_URLS = {
"raft_large": "https://download.pytorch.org/models/raft_large_C_T_V2-1bb1363a.pth",
"raft_small": "https://download.pytorch.org/models/raft_small_C_T_V2-01064c6d.pth",
}
class ResidualBlock(nn.Module):
"""Slightly modified Residual block with extra relu and biases."""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
super().__init__()
# Note regarding bias=True:
# Usually we can pass bias=False in conv layers followed by a norm layer.
# But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset,
# and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
# for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
# because these aren't frozen, but we don't bother (also, we woudn't be able to load the original weights).
self.convnormrelu1 = ConvNormActivation(
in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu2 = ConvNormActivation(
out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
)
if stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = ConvNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
x = self.downsample(x)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
"""Slightly modified BottleNeck block (extra relu and biases)"""
def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
super().__init__()
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu1 = ConvNormActivation(
in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.convnormrelu2 = ConvNormActivation(
out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
)
self.convnormrelu3 = ConvNormActivation(
out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
)
self.relu = nn.ReLU(inplace=True)
if stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = ConvNormActivation(
in_channels,
out_channels,
norm_layer=norm_layer,
kernel_size=1,
stride=stride,
bias=True,
activation_layer=None,
)
def forward(self, x):
y = x
y = self.convnormrelu1(y)
y = self.convnormrelu2(y)
y = self.convnormrelu3(y)
x = self.downsample(x)
return self.relu(x + y)
class FeatureEncoder(nn.Module):
"""The feature encoder, used both as the actual feature encoder, and as the context encoder.
It must downsample its input by 8.
"""
def __init__(self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), norm_layer=nn.BatchNorm2d):
super().__init__()
assert len(layers) == 5
# See note in ResidualBlock for the reason behind bias=True
self.convnormrelu = ConvNormActivation(3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=2, bias=True)
self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=1)
self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=2)
self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=2)
self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride):
block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride)
block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1)
return nn.Sequential(block1, block2)
def forward(self, x):
x = self.convnormrelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv(x)
return x
class MotionEncoder(nn.Module):
"""The motion encoder, part of the update block.
Takes the current predicted flow and the correlation features as input and returns an encoded version of these.
"""
def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128):
super().__init__()
assert len(flow_layers) == 2
assert len(corr_layers) in (1, 2)
self.convcorr1 = ConvNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
if len(corr_layers) == 2:
self.convcorr2 = ConvNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
else:
self.convcorr2 = nn.Identity()
self.convflow1 = ConvNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
self.convflow2 = ConvNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
# out_channels - 2 because we cat the flow (2 channels) at the end
self.conv = ConvNormActivation(
corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
)
self.out_channels = out_channels
def forward(self, flow, corr_features):
corr = self.convcorr1(corr_features)
corr = self.convcorr2(corr)
flow_orig = flow
flow = self.convflow1(flow)
flow = self.convflow2(flow)
corr_flow = torch.cat([corr, flow], dim=1)
corr_flow = self.conv(corr_flow)
return torch.cat([corr_flow, flow_orig], dim=1)
class ConvGRU(nn.Module):
"""Convolutional Gru unit."""
def __init__(self, *, input_size, hidden_size, kernel_size, padding):
super().__init__()
self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
def _pass_through_h(h, _):
# Declared here for torchscript
return h
class RecurrentBlock(nn.Module):
"""Recurrent block, part of the update block.
Takes the current hidden state and the concatenation of (motion encoder output, context) as input.
Returns an updated hidden state.
"""
def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))):
super().__init__()
assert len(kernel_size) == len(padding)
assert len(kernel_size) in (1, 2)
self.convgru1 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0]
)
if len(kernel_size) == 2:
self.convgru2 = ConvGRU(
input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1]
)
else:
self.convgru2 = _pass_through_h
self.hidden_size = hidden_size
def forward(self, h, x):
h = self.convgru1(h, x)
h = self.convgru2(h, x)
return h
class FlowHead(nn.Module):
"""Flow head, part of the update block.
Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow".
"""
def __init__(self, *, in_channels, hidden_size):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class UpdateBlock(nn.Module):
"""The update block which contains the motion encoder, the recurrent block, and the flow head.
It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block.
"""
def __init__(self, *, motion_encoder, recurrent_block, flow_head):
super().__init__()
self.motion_encoder = motion_encoder
self.recurrent_block = recurrent_block
self.flow_head = flow_head
self.hidden_state_size = recurrent_block.hidden_size
def forward(self, hidden_state, context, corr_features, flow):
motion_features = self.motion_encoder(flow, corr_features)
x = torch.cat([context, motion_features], dim=1)
hidden_state = self.recurrent_block(hidden_state, x)
delta_flow = self.flow_head(hidden_state)
return hidden_state, delta_flow
class MaskPredictor(nn.Module):
"""Mask predictor to be used when upsampling the predicted flow.
It takes the hidden state of the recurrent unit as input and outputs the mask.
This is not used in the raft-small model.
"""
def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
super().__init__()
self.convrelu = ConvNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
# 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder
# and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)
# In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch.
# See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419
# or https://github.com/princeton-vl/RAFT/issues/24.
# It doesn't seem to affect epe significantly and can likely be set to 1.
self.multiplier = multiplier
def forward(self, x):
x = self.convrelu(x)
x = self.conv(x)
return self.multiplier * x
class CorrBlock(nn.Module):
"""The correlation block.
Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder,
and then indexes from this pyramid to create correlation features.
The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that
are within a ``radius``, according to the infinity norm (see paper section 3.2).
Note: typo in the paper, it should be infinity norm, not 1-norm.
"""
def __init__(self, *, num_levels: int = 4, radius: int = 4):
super().__init__()
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :')
# The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius}
# so it's a square surrounding x', and its sides have a length of 2 * radius + 1
# The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo:
# https://github.com/princeton-vl/RAFT/issues/122
self.out_channels = num_levels * (2 * radius + 1) ** 2
def build_pyramid(self, fmap1, fmap2):
"""Build the correlation pyramid from two feature maps.
The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2)
The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
to build the correlation pyramid.
"""
torch._assert(fmap1.shape == fmap2.shape, "Input feature maps should have the same shapes")
corr_volume = self._compute_corr_volume(fmap1, fmap2)
batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w
corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w)
self.corr_pyramid = [corr_volume]
for _ in range(self.num_levels - 1):
corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2)
self.corr_pyramid.append(corr_volume)
def index_pyramid(self, centroids_coords):
"""Return correlation features by indexing from the pyramid."""
neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
di = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device)
delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2)
batch_size, _, h, w = centroids_coords.shape # _ = 2
centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2)
indexed_pyramid = []
for corr_volume in self.corr_pyramid:
sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2)
indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
batch_size, h, w, -1
)
indexed_pyramid.append(indexed_corr_volume)
centroids_coords = centroids_coords / 2
corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
expected_output_shape = (batch_size, self.out_channels, h, w)
torch._assert(
corr_features.shape == expected_output_shape,
f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}",
)
return corr_features
def _compute_corr_volume(self, fmap1, fmap2):
batch_size, num_channels, h, w = fmap1.shape
fmap1 = fmap1.view(batch_size, num_channels, h * w)
fmap2 = fmap2.view(batch_size, num_channels, h * w)
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch_size, h, w, 1, h, w)
return corr / torch.sqrt(torch.tensor(num_channels))
class RAFT(nn.Module):
def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None):
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
args:
feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8.
Its input is the concatenation of ``image1`` and ``image2``.
context_encoder (nn.Module): The context encoder. It must downsample the input by 8.
Its input is ``image1``. As in the original implementation, its output will be split into 2 parts:
- one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
- one part will be used to initialize the hidden state of the of the recurrent unit of
the ``update_block``
These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output
of the ``context_encoder`` must be strictly greater than ``hidden_state_size``.
corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the
``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose
2 methods:
- a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the
output of the ``feature_encoder``).
- a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns
the correlation features. See paper section 3.2.
It must expose an ``out_channels`` attribute.
update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the
flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation
features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow``
prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute.
mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B.
If ``None`` (default), the flow is upsampled using interpolation.
"""
super().__init__()
_log_api_usage_once(self)
self.feature_encoder = feature_encoder
self.context_encoder = context_encoder
self.corr_block = corr_block
self.update_block = update_block
self.mask_predictor = mask_predictor
if not hasattr(self.update_block, "hidden_state_size"):
raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.")
def forward(self, image1, image2, num_flow_updates: int = 12):
batch_size, _, h, w = image1.shape
torch._assert((h, w) == image2.shape[-2:], "input images should have the same shape")
torch._assert((h % 8 == 0) and (w % 8 == 0), "input image H and W should be divisible by 8")
fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0))
fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
torch._assert(fmap1.shape[-2:] == (h // 8, w // 8), "The feature encoder should downsample H and W by 8")
self.corr_block.build_pyramid(fmap1, fmap2)
context_out = self.context_encoder(image1)
torch._assert(context_out.shape[-2:] == (h // 8, w // 8), "The context encoder should downsample H and W by 8")
# As in the original paper, the actual output of the context encoder is split in 2 parts:
# - one part is used to initialize the hidden state of the recurent units of the update block
# - the rest is the "actual" context.
hidden_state_size = self.update_block.hidden_state_size
out_channels_context = context_out.shape[1] - hidden_state_size
torch._assert(
out_channels_context > 0,
f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than"
f"hidden_state={hidden_state_size} channels",
)
hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1)
hidden_state = torch.tanh(hidden_state)
context = F.relu(context)
coords0 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
coords1 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
flow_predictions = []
for _ in range(num_flow_updates):
coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
corr_features = self.corr_block.index_pyramid(centroids_coords=coords1)
flow = coords1 - coords0
hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow)
coords1 = coords1 + delta_flow
up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask)
flow_predictions.append(upsampled_flow)
return flow_predictions
def _raft(
*,
arch=None,
pretrained=False,
progress=False,
# Feature encoder
feature_encoder_layers,
feature_encoder_block,
feature_encoder_norm_layer,
# Context encoder
context_encoder_layers,
context_encoder_block,
context_encoder_norm_layer,
# Correlation block
corr_block_num_levels,
corr_block_radius,
# Motion encoder
motion_encoder_corr_layers,
motion_encoder_flow_layers,
motion_encoder_out_channels,
# Recurrent block
recurrent_block_hidden_state_size,
recurrent_block_kernel_size,
recurrent_block_padding,
# Flow Head
flow_head_hidden_size,
# Mask predictor
use_mask_predictor,
**kwargs,
):
feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer
)
context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder(
block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer
)
corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius)
update_block = kwargs.pop("update_block", None)
if update_block is None:
motion_encoder = MotionEncoder(
in_channels_corr=corr_block.out_channels,
corr_layers=motion_encoder_corr_layers,
flow_layers=motion_encoder_flow_layers,
out_channels=motion_encoder_out_channels,
)
# See comments in forward pass of RAFT class about why we split the output of the context encoder
out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size
recurrent_block = RecurrentBlock(
input_size=motion_encoder.out_channels + out_channels_context,
hidden_size=recurrent_block_hidden_state_size,
kernel_size=recurrent_block_kernel_size,
padding=recurrent_block_padding,
)
flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size)
update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
mask_predictor = kwargs.pop("mask_predictor", None)
if mask_predictor is None and use_mask_predictor:
mask_predictor = MaskPredictor(
in_channels=recurrent_block_hidden_state_size,
hidden_size=256,
multiplier=0.25, # See comment in MaskPredictor about this
)
model = RAFT(
feature_encoder=feature_encoder,
context_encoder=context_encoder,
corr_block=corr_block,
update_block=update_block,
mask_predictor=mask_predictor,
**kwargs, # not really needed, all params should be consumed by now
)
if pretrained:
state_dict = load_state_dict_from_url(_MODELS_URLS[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def raft_large(*, pretrained=False, progress=True, **kwargs):
"""RAFT model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Args:
pretrained (bool): Whether to use pretrained weights.
progress (bool): If True, displays a progress bar of the download to stderr
kwargs (dict): Parameters that will be passed to the :class:`~torchvision.models.optical_flow.RAFT` class
to override any default.
Returns:
nn.Module: The model.
"""
return _raft(
arch="raft_large",
pretrained=pretrained,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_block=ResidualBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_block=ResidualBlock,
context_encoder_norm_layer=BatchNorm2d,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=4,
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Mask predictor
use_mask_predictor=True,
**kwargs,
)
def raft_small(*, pretrained=False, progress=True, **kwargs):
"""RAFT "small" model from
`RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
Args:
pretrained (bool): Whether to use pretrained weights.
progress (bool): If True, displays a progress bar of the download to stderr
kwargs (dict): Parameters that will be passed to the :class:`~torchvision.models.optical_flow.RAFT` class
to override any default.
Returns:
nn.Module: The model.
"""
return _raft(
arch="raft_small",
pretrained=pretrained,
progress=progress,
# Feature encoder
feature_encoder_layers=(32, 32, 64, 96, 128),
feature_encoder_block=BottleneckBlock,
feature_encoder_norm_layer=InstanceNorm2d,
# Context encoder
context_encoder_layers=(32, 32, 64, 96, 160),
context_encoder_block=BottleneckBlock,
context_encoder_norm_layer=None,
# Correlation block
corr_block_num_levels=4,
corr_block_radius=3,
# Motion encoder
motion_encoder_corr_layers=(96,),
motion_encoder_flow_layers=(64, 32),
motion_encoder_out_channels=82,
# Recurrent block
recurrent_block_hidden_state_size=96,
recurrent_block_kernel_size=(3,),
recurrent_block_padding=(1,),
# Flow head
flow_head_hidden_size=128,
# Mask predictor
use_mask_predictor=False,
**kwargs,
)
|
import PIL
from userbot.utils import admin_cmd
from userbot import CMD_HELP
# ascii characters used to build the output text
import pygments, os, asyncio
from pygments.lexers import Python3Lexer
from pygments.formatters import ImageFormatter
from userbot.utils import admin_cmd
from userbot import bot
from userbot import bot as borg
@bot.on(admin_cmd(pattern="ascii ?(.*)", outgoing=True))
# DONOT KANG by @THE_B_LACK_HAT & Sh1vam
async def __(event):
gltg=event.text
n=int(gltg[7:])
path = "jvs"
await event.delete()
reply = await event.get_reply_message()
down = await borg.download_media(reply.media, path)
ASCII_CHARS = ["@", "#", "S", "%", "?", "*", "+", ";", ":", ",", "."]
# resize image according to a new width
def resize_image(image, new_width=n):
width, height = image.size
#reference_length = max(width, height)
#ratio = 512 / reference_length
ratio = height/width
new_height = int(new_width * ratio)
resized_image = image.resize((new_width,new_height))
return(resized_image)
# convert each pixel to grayscale
def grayify(image):
grayscale_image = image.convert("L")
return(grayscale_image)
# convert pixels to a string of ascii characters
def pixels_to_ascii(image):
pixels = image.getdata()
characters = "".join([ASCII_CHARS[pixel//25] for pixel in pixels])
return(characters)
def main(new_width=n):
# attempt to open image from user-input
d = down
try:
image = PIL.Image.open(d)
except:
pass
# convert image to ascii
new_image_data = pixels_to_ascii(grayify(resize_image(image)))
# format
pixel_count = len(new_image_data)
ascii_image = "\n".join([new_image_data[index:(index+new_width)] for index in range(0, pixel_count, new_width)])
# save result to "ascii_image.txt"
with open("ascii_image.txt", "w") as f:
f.write(ascii_image)
s = open("ascii_image.txt", 'r')
c = s.read()
s.close()
pygments.highlight(f"{c}", Python3Lexer(), ImageFormatter(font_name="DejaVu Sans Mono", line_numbers=False), "ascii.png")
imgs="ascii.png"
shvm=PIL.Image.open(imgs)
sh1,vam = image.size
img=shvm.resize((int(sh1),int(vam)))
img.save("asci.png", format="PNG", optimize=True)
main()
await event.client.send_file(event.chat_id, "asci.png", force_document=False, reply_to=event.reply_to_msg_id)
os.remove('ascii.png')
os.remove('asci.png')
os.remove('ascii_image.txt')
os.remove(down)
# run program
|
import copy
from typing import List, Dict, Any, Optional # noqa
from chalice.config import Config # noqa
from chalice import constants
from chalice import __version__ as chalice_version
class InvalidCodeBuildPythonVersion(Exception):
def __init__(self, version):
# type: (str) -> None
super(InvalidCodeBuildPythonVersion, self).__init__(
'CodeBuild does not yet support python version %s.' % version
)
class PipelineParameters(object):
def __init__(self, app_name, lambda_python_version,
codebuild_image=None, code_source='codecommit',
chalice_version_range=None):
# type: (str, str, Optional[str], str, Optional[str]) -> None
self.app_name = app_name
self.lambda_python_version = lambda_python_version
self.codebuild_image = codebuild_image
self.code_source = code_source
if chalice_version_range is None:
chalice_version_range = self._lock_to_minor_version()
self.chalice_version_range = chalice_version_range
def _lock_to_minor_version(self):
# type: () -> str
parts = [int(p) for p in chalice_version.rsplit(' ', 1)[-1].split('.')]
min_version = '%s.%s.%s' % (parts[0], parts[1], 0)
max_version = '%s.%s.%s' % (parts[0], parts[1] + 1, 0)
return '>=%s,<%s' % (min_version, max_version)
class CreatePipelineTemplate(object):
_CODEBUILD_IMAGE = {
'python2.7': 'python:2.7.12',
'python3.6': 'python:3.6.7',
'python3.7': 'python:3.7.1',
}
_BASE_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"ApplicationName": {
"Default": "ChaliceApp",
"Type": "String",
"Description": "Enter the name of your application"
},
"CodeBuildImage": {
"Default": "aws/codebuild/python:3.7.1",
"Type": "String",
"Description": "Name of codebuild image to use."
}
},
"Resources": {},
"Outputs": {},
}
def create_template(self, pipeline_params):
# type: (PipelineParameters) -> Dict[str, Any]
t = copy.deepcopy(self._BASE_TEMPLATE) # type: Dict[str, Any]
params = t['Parameters']
params['ApplicationName']['Default'] = pipeline_params.app_name
params['CodeBuildImage']['Default'] = self._get_codebuild_image(
pipeline_params)
resources = [] # type: List[BaseResource]
if pipeline_params.code_source == 'github':
resources.append(GithubSource())
else:
resources.append(CodeCommitSourceRepository())
resources.extend([CodeBuild(), CodePipeline()])
for resource in resources:
resource.add_to_template(t, pipeline_params)
return t
def _get_codebuild_image(self, params):
# type: (PipelineParameters) -> str
if params.codebuild_image is not None:
return params.codebuild_image
try:
image_suffix = self._CODEBUILD_IMAGE[params.lambda_python_version]
return 'aws/codebuild/%s' % image_suffix
except KeyError as e:
raise InvalidCodeBuildPythonVersion(str(e))
class BaseResource(object):
def add_to_template(self, template, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
raise NotImplementedError("add_to_template")
class CodeCommitSourceRepository(BaseResource):
def add_to_template(self, template, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
resources = template.setdefault('Resources', {})
resources['SourceRepository'] = {
"Type": "AWS::CodeCommit::Repository",
"Properties": {
"RepositoryName": {
"Ref": "ApplicationName"
},
"RepositoryDescription": {
"Fn::Sub": "Source code for ${ApplicationName}"
}
}
}
template.setdefault('Outputs', {})['SourceRepoURL'] = {
"Value": {
"Fn::GetAtt": "SourceRepository.CloneUrlHttp"
}
}
class GithubSource(BaseResource):
def add_to_template(self, template, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
# For the github source, we don't create a github repo,
# we just wire it up in the code pipeline. The
# only thing we add to the template are parameters
# we reference in other resources later.
p = template.setdefault('Parameters', {})
p['GithubOwner'] = {
'Type': 'String',
'Description': 'The github owner or org name of the repository.',
}
p['GithubRepoName'] = {
'Type': 'String',
'Description': 'The name of the github repository.',
}
p['GithubPersonalToken'] = {
'Type': 'String',
'Description': 'Personal access token for the github repo.',
'NoEcho': True,
}
class CodeBuild(BaseResource):
def add_to_template(self, template, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
resources = template.setdefault('Resources', {})
outputs = template.setdefault('Outputs', {})
# Used to store the application source when the SAM
# template is packaged.
self._add_s3_bucket(resources, outputs)
self._add_codebuild_role(resources, outputs)
self._add_codebuild_policy(resources)
self._add_package_build(resources, pipeline_params)
def _add_package_build(self, resources, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
resources['AppPackageBuild'] = {
"Type": "AWS::CodeBuild::Project",
"Properties": {
"Artifacts": {
"Type": "CODEPIPELINE"
},
"Environment": {
"ComputeType": "BUILD_GENERAL1_SMALL",
"Image": {
"Ref": "CodeBuildImage"
},
"Type": "LINUX_CONTAINER",
"EnvironmentVariables": [
{
"Name": "APP_S3_BUCKET",
"Value": {
"Ref": "ApplicationBucket"
}
}
]
},
"Name": {
"Fn::Sub": "${ApplicationName}Build"
},
"ServiceRole": {
"Fn::GetAtt": "CodeBuildRole.Arn"
},
"Source": {
"Type": "CODEPIPELINE",
"BuildSpec": self._get_default_buildspec(pipeline_params),
}
}
}
def _get_default_buildspec(self, pipeline_params):
# type: (PipelineParameters) -> str
return (
"version: 0.1\n"
"phases:\n"
" install:\n"
" commands:\n"
" - sudo pip install --upgrade awscli\n"
" - aws --version\n"
" - sudo pip install 'chalice%s'\n"
" - sudo pip install -r requirements.txt\n"
" - chalice package /tmp/packaged\n"
" - aws cloudformation package"
" --template-file /tmp/packaged/sam.json"
" --s3-bucket ${APP_S3_BUCKET}"
" --output-template-file transformed.yaml\n"
"artifacts:\n"
" type: zip\n"
" files:\n"
" - transformed.yaml\n"
) % pipeline_params.chalice_version_range
def _add_s3_bucket(self, resources, outputs):
# type: (Dict[str, Any], Dict[str, Any]) -> None
resources['ApplicationBucket'] = {'Type': 'AWS::S3::Bucket'}
outputs['S3ApplicationBucket'] = {
'Value': {'Ref': 'ApplicationBucket'}
}
def _add_codebuild_role(self, resources, outputs):
# type: (Dict[str, Any], Dict[str, Any]) -> None
resources['CodeBuildRole'] = {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Principal": {
"Service": [
"codebuild.amazonaws.com"
]
}
}
]
}
}
}
outputs['CodeBuildRoleArn'] = {
"Value": {
"Fn::GetAtt": "CodeBuildRole.Arn"
}
}
def _add_codebuild_policy(self, resources):
# type: (Dict[str, Any]) -> None
resources['CodeBuildPolicy'] = {
"Type": "AWS::IAM::Policy",
"Properties": {
"PolicyName": "CodeBuildPolicy",
"PolicyDocument": constants.CODEBUILD_POLICY,
"Roles": [
{
"Ref": "CodeBuildRole"
}
]
}
}
class CodePipeline(BaseResource):
def add_to_template(self, template, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
resources = template.setdefault('Resources', {})
outputs = template.setdefault('Outputs', {})
self._add_pipeline(resources, pipeline_params)
self._add_bucket_store(resources, outputs)
self._add_codepipeline_role(resources, outputs)
self._add_cfn_deploy_role(resources, outputs)
def _add_cfn_deploy_role(self, resources, outputs):
# type: (Dict[str, Any], Dict[str, Any]) -> None
outputs['CFNDeployRoleArn'] = {
'Value': {'Fn::GetAtt': 'CFNDeployRole.Arn'}
}
resources['CFNDeployRole'] = {
'Type': 'AWS::IAM::Role',
'Properties': {
"Policies": [
{
"PolicyName": "DeployAccess",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": "*",
"Resource": "*",
"Effect": "Allow"
}
]
}
}
],
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Principal": {
"Service": [
"cloudformation.amazonaws.com"
]
}
}
]
}
}
}
def _add_pipeline(self, resources, pipeline_params):
# type: (Dict[str, Any], PipelineParameters) -> None
properties = {
'Name': {
'Fn::Sub': '${ApplicationName}Pipeline'
},
'ArtifactStore': {
'Type': 'S3',
'Location': {'Ref': 'ArtifactBucketStore'},
},
'RoleArn': {
'Fn::GetAtt': 'CodePipelineRole.Arn',
},
'Stages': self._create_pipeline_stages(pipeline_params),
}
resources['AppPipeline'] = {
'Type': 'AWS::CodePipeline::Pipeline',
'Properties': properties
}
def _create_pipeline_stages(self, pipeline_params):
# type: (PipelineParameters) -> List[Dict[str, Any]]
# The goal is to eventually allow a user to configure
# the various stages they want created. For now, there's
# a fixed list.
stages = []
source = self._create_source_stage(pipeline_params)
if source:
stages.append(source)
stages.extend([self._create_build_stage(), self._create_beta_stage()])
return stages
def _code_commit_source(self):
# type: () -> Dict[str, Any]
return {
"Name": "Source",
"Actions": [
{
"ActionTypeId": {
"Category": "Source",
"Owner": "AWS",
"Version": 1,
"Provider": "CodeCommit"
},
"Configuration": {
"BranchName": "master",
"RepositoryName": {
"Fn::GetAtt": "SourceRepository.Name"
}
},
"OutputArtifacts": [
{
"Name": "SourceRepo"
}
],
"RunOrder": 1,
"Name": "Source"
}
]
}
def _create_source_stage(self, pipeline_params):
# type: (PipelineParameters) -> Dict[str, Any]
if pipeline_params.code_source == 'codecommit':
return self._code_commit_source()
return self._github_source()
def _github_source(self):
# type: () -> Dict[str, Any]
return {
'Name': 'Source',
'Actions': [{
"ActionTypeId": {
"Category": "Source",
"Owner": "ThirdParty",
"Version": 1,
"Provider": "GitHub"
},
'RunOrder': 1,
'OutputArtifacts': {
'Name': 'SourceRepo',
},
'Configuration': {
'Owner': {'Ref': 'GithubOwner'},
'Repo': {'Ref': 'GithubRepoName'},
'OAuthToken': {'Ref': 'GithubPersonalToken'},
'Branch': 'master',
'PollForSourceChanges': True,
}
}],
}
def _create_build_stage(self):
# type: () -> Dict[str, Any]
return {
"Name": "Build",
"Actions": [
{
"InputArtifacts": [
{
"Name": "SourceRepo"
}
],
"Name": "CodeBuild",
"ActionTypeId": {
"Category": "Build",
"Owner": "AWS",
"Version": 1,
"Provider": "CodeBuild"
},
"OutputArtifacts": [
{
"Name": "CompiledCFNTemplate"
}
],
"Configuration": {
"ProjectName": {
"Ref": "AppPackageBuild"
}
},
"RunOrder": 1
}
]
}
def _create_beta_stage(self):
# type: () -> Dict[str, Any]
return {
"Name": "Beta",
"Actions": [
{
"ActionTypeId": {
"Category": "Deploy",
"Owner": "AWS",
"Version": 1,
"Provider": "CloudFormation"
},
"InputArtifacts": [
{
"Name": "CompiledCFNTemplate"
}
],
"Name": "CreateBetaChangeSet",
"Configuration": {
"ActionMode": "CHANGE_SET_REPLACE",
"ChangeSetName": {
"Fn::Sub": "${ApplicationName}ChangeSet"
},
"RoleArn": {
"Fn::GetAtt": "CFNDeployRole.Arn"
},
"Capabilities": "CAPABILITY_IAM",
"StackName": {
"Fn::Sub": "${ApplicationName}BetaStack"
},
"TemplatePath": "CompiledCFNTemplate::transformed.yaml"
},
"RunOrder": 1
},
{
"RunOrder": 2,
"ActionTypeId": {
"Category": "Deploy",
"Owner": "AWS",
"Version": 1,
"Provider": "CloudFormation"
},
"Configuration": {
"StackName": {
"Fn::Sub": "${ApplicationName}BetaStack"
},
"ActionMode": "CHANGE_SET_EXECUTE",
"ChangeSetName": {
"Fn::Sub": "${ApplicationName}ChangeSet"
},
"OutputFileName": "StackOutputs.json"
},
"Name": "ExecuteChangeSet",
"OutputArtifacts": [
{
"Name": "AppDeploymentValues"
}
]
}
]
}
def _add_bucket_store(self, resources, outputs):
# type: (Dict[str, Any], Dict[str, Any]) -> None
resources['ArtifactBucketStore'] = {
'Type': 'AWS::S3::Bucket',
'Properties': {
'VersioningConfiguration': {
'Status': 'Enabled'
}
}
}
outputs['S3PipelineBucket'] = {
'Value': {'Ref': 'ArtifactBucketStore'}
}
def _add_codepipeline_role(self, resources, outputs):
# type: (Dict[str, Any], Dict[str, Any]) -> None
outputs['CodePipelineRoleArn'] = {
'Value': {'Fn::GetAtt': 'CodePipelineRole.Arn'}
}
resources['CodePipelineRole'] = {
"Type": "AWS::IAM::Role",
"Properties": {
"Policies": [
{
"PolicyName": "DefaultPolicy",
"PolicyDocument": constants.CODEPIPELINE_POLICY,
}
],
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Principal": {
"Service": [
"codepipeline.amazonaws.com"
]
}
}
]
}
}
}
class BuildSpecExtractor(object):
def extract_buildspec(self, template):
# type: (Dict[str, Any]) -> str
source = template['Resources']['AppPackageBuild'][
'Properties']['Source']
buildspec = source.pop('BuildSpec')
return buildspec
|
# -*- coding: utf-8 -*-
"""
grades/gradetable.py - last updated 2021-06-02
Access grade data, read and build grade tables.
==============================
Copyright 2021 Michael Towers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#TODO: Reimplement table handling based on "straight" (dict) data
# and the new JSON structure.
### Messages
_INVALID_INFO_KEY = "Ungüliges INFO-Feld ({key}) in Notentabelle:\n {fpath}"
_TABLE_CLASS_MISMATCH = "Falsche Klasse/Gruppe in Notentabelle:\n {fpath}"
_TABLE_TERM_MISMATCH = "Falscher \"Anlass\" in Notentabelle:\n {fpath}"
_TABLE_YEAR_MISMATCH = "Falsches Schuljahr in Notentabelle:\n {fpath}"
_PIDS_NOT_IN_GROUP = "Schüler nicht in Gruppe {group}: {pids}"
_WARN_EXTRA_PUPIL = "Unerwarteter Schüler ({name}) in" \
" Notentabelle:\n {tfile}"
_WARN_EXTRA_SUBJECT = "Unerwartetes Fach ({sid}) in" \
" Notentabelle:\n {tfile}"
_ERROR_OVERWRITE2 = "Neue Note für {name} im Fach {sid} mehrmals" \
" vorhanden:\n {tfile1}\n {tfile2}"
_WARN_OVERWRITE = "Geänderte Note für {name} im Fach {sid}:\n {tfile}"
_NEW_GRADE_EMPTY = "Bug: leeres Notenfeld für {name} im Fach {sid}:\n {tfile}"
_BAD_GRADE = "Ungültige Note im Fach {sid}: {g}"
_NO_DATE = "Kein Notendatum angegeben"
#_DATE_EXISTS = "Ausgabedatum existiert schon"
_TITLE2 = "Tabelle erstellt am {time}"
import sys, os
if __name__ == '__main__':
# Enable package import if running as module
this = sys.path[0]
sys.path[0] = os.path.dirname(this)
import datetime
from fractions import Fraction
from collections import namedtuple
from core.base import Dates
from core.pupils import PUPILS
from core.courses import SUBJECTS
from tables.spreadsheet import Spreadsheet, TableError, make_db_table
from tables.matrix import KlassMatrix
from tables.datapack import get_pack, save_pack
from local.base_config import DECIMAL_SEP, USE_XLSX, year_path, NO_DATE
from local.grade_config import GradeBase, UNCHOSEN, NO_GRADE, \
GRADE_INFO_FIELDS, GradeConfigError
from local.abitur_config import AbiCalc
class GradeTableError(Exception):
pass
class FailedSave(Exception):
pass
### +++++
class GradeTable:
"""Manage the grade data for a term (etc.) and group.
The data is a mapping (see documentation) which is stored in JSON.
It contains header information, for the whole table, and the grades
(etc.) for each pupil in the group. It is available as <self.data>.
Methods provide further information connected to the grades:
- <subjects()>: {sid -> subject-name} (just "real" sids)
-
... ?
<group>: school-class/group, as specified in
<GradeBase.REPORT_GROUPS>
<term>: a string representing a valid "term" (school-term, etc.)
<subselect>: depending on <term>, may be empty, 'STUDENT' or
'TAG'.
<schoolyear>: school-year
<issue_d>: date of issue
<grades_d>: date of grade finalization
<sid2subject_data>: {sid -> subject_data} also for "special" sids
<subjects>: {sid -> subject-name} (just "real" sids)
<composites>: {sid -> subject-name} ("composite" sids)
<components>: set of "component" sids
<extra>: {sid/tag -> text name} ("extra" data, treated as grade)
<name>: {pid -> (short) name}
"""
#
def __init__(self, schoolyear, group, term = None):
self.schoolyear = schoolyear
self.group = group
# Split group name (e.g. '11.G' into class ('11') and group tag
# ('G')).
# If there is no '.' in the group name, this is assumed to be
# the class, the group is then ''.
try:
self.klass, self.grouptag = group.split('.')
except ValueError:
self.klass, self.grouptag = group, NONE
self.term = term
if term:
# Try to load existing table
table_path = year_path(schoolyear,
GradeBase.table_path(group, term))
try:
gdata = get_pack(table_path)
#? Check SCHOOLYEAR = schoolyear,
# GROUP = group, TERM = term)
except FileNotFoundError:
# File doesn't exist
#?
self.new_group_table(gdata)
issue_d = gdata.get('ISSUE_D') or NO_DATE
if issue_d == NO_DATE or issue_d >= Dates.today():
# The data is not yet "closed".
self._new_group_table(grade_data = gdata)
return
self.issue_d = issue_d
self.grades_d = gdata.get('GRADES_D') or NO_DATE
for row in gdata.get('__PUPILS__'):
pid = row['PID']
gmap = row['__DATA__']
self.name[pid] = row['NAME']
grades = _Grades(group, row.get('STREAM'), term)
grades.init_grades(self._include_grades(grades, gmap))
self[pid] = grades
for comp in self.composites:
grades.composite_calc(self.sid2subject_data[comp])
self.data = None
#
#
def new_group_table(self, grade_data = None):
"""Initialize an empty table for <self.group> and <self.term>.
If <grade_data> is supplied, it should be a grade-table. Its
contents are added to the new table, in so far as there are
corresponding slots.
{'HEADER':
{'SCHOOLYEAR': '2021',
'GROUP': '12.G',
'TERM': '2',
'ISSUE_D': '2021-07-21',
'GRADES_D': '2021-07-10'
},
'MEMBERS':
[{'PID': '001234', 'NAME': 'Hans Müller', 'LEVEL': 'Gym',
'__GRADES__': {'De': '08', 'En': '11', ...},
'__EXTRA__': {..., '+Q': '13', '+Z': 'Zeugnis', ...}
},
...
],
...
}
"""
# First get pupils and subjects
pupils = PUPILS(self.schoolyear)
subjects = SUBJECTS(self.schoolyear)
self.sid2subject_data = subjects.grade_subjects(self.klass,
self.grouptag)
# <sid2subject_data> contains composite subjects, but not other
# calculated or extra tags.
#TODO: get (other) calcs and extras
#+++++++++++++++++++++
# Actually, can't I store the composite and tids fields in already processed
# form? No, rather keep it all in one place ...
#TODO: This bit should probably be separate ... or I add the possibility
# of a "table" for a single pupil – needed for custom reports.
self.real_subjects = {} # name-mapping just for "real" subjects
self.composites = {} # name-mapping for composite sids
### Collect weighted components for composites, etc.
self.comp2weights = {}
self.depends = {} # which sids depend on an entry
for sid, sdata in self.sid2subject_data.items():
if sdata['TIDS']:
# "real" (taught) subject
self.real_subjects[sid] = subjects.subject_name(sid)
else:
# "composite" subject
self.composites[sid] = subjects.subject_name(sid)
# Note that it is thus impossible to have "special subjects" other than
# composites under this scheme. That is, no other intermediate values
# can be registered here. Maybe that is no problem, though.
# Composites are a bit special in that they produce grades which appear
# in the reports.
compos = sdata['COMPOSITE']
if compos:
_compos = []
self.depends[sid] = _compos
for c in compos.split():
try:
ctag, w = c.split(':')
except ValueError:
ctag, w = c, 1
else:
w = int(w)
_compos.append(ctag)
self.comp2weights.setdefault(ctag, {})[sid] = w
#TODO: cache the minion file or the processed data?
# Allow term to play a role in configuration?
_extras = MINION(os.path.join(DATA, 'GRADE_EXTRAS'))
self.extras = {}
self.extra2handler = {}
try:
extras = _extras[self.group]
except:
pass
else:
for e in extras:
self.extras[e[0]] = e[1]
self.extra2handler[e[0]] = e[2:]
#TODO: If the calculated fields are not included in the stored version
# (which is the intended behaviour), it will be necessary to be able to
# distinguish them from the extra fields which will be saved!
# The simplest might be to insist that calculated fields start with '$'.
#---------------------
pupils = pupils.class_pupils(self.klass, self.grouptag)
# Build MEMBERS list:
self.members = []
self.pid2members = {}
for pdata in pupils:
pid = pdata['PID']
mdata = {'PID': pid, 'NAME': pupils.name(pdata),
#TODO:
'LEVEL': GradeBase.get_level(self.klass, self.grouptag),
'__GRADES__': ,
'__EXTRA__':
}
self.members.append()
#TODO
if grade_data:
self.set_header('ISSUE_D',
grade_data.get_header('ISSUE_D') or NO_DATE)
self.set_header('GRADES_D',
grade_data.get_header('GRADES_D') or NO_DATE)
issue_d = date
self.grades_d = grade_data.get('GRADES_D') or NO_DATE
grade_maps = {}
for pdata in grade_data['__PUPILS__']:
grade_maps[pdata['PID']] = pdata['__DATA__']
else:
grade_maps = None
## Initialize the dates (issue at end of term, or end of year)
if self.term in ('S', 'T'):
# ... unless it is a special table
date = NO_DATE
else:
calendar = Dates.get_calendar(self.schoolyear)
try:
date = calendar['TERM_%d' % (int(self.term) + 1)]
except:
date = calendar['LAST_DAY']
else:
# Previous day, ensure that it is a weekday
td = datetime.timedelta(days = 1)
d = datetime.date.fromisoformat(date)
while True:
d -= td
if d.weekday() < 5:
date = d.isoformat()
break
self.issue_d = date
self.grades_d = NO_DATE
## Pupil information
# Pupil data, select pupils
pupils = PUPILS(self.schoolyear)
pidset = set(pids) if pids else None
for pdata in pupils.group2pupils(self.group, date = date):
pid = pdata['PID']
if pids:
try:
pidset.remove(pid)
except KeyError:
continue
self.name[pid] = pupils.name(pdata)
# Set grades
try:
gmap = grade_maps[pid]
except:
gmap = {}
grades = _Grades(self.group, pdata['STREAM'], self.term)
grades.init_grades(self._include_grades(grades, gmap))
self[pid] = grades
for comp in self.composites:
grades.composite_calc(self.sid2subject_data[comp])
if pidset:
raise GradeTableError(_PIDS_NOT_IN_GROUP.format(
group = self.group, pids = ', '.join(pidset)))
#
def _set_group_term(self, group, term, tag):
"""Set the subjects and extra pupil-data fields for the given
group and term (and tag, in the case of TAG-subselects).
"""
self.group = group
self.term = term
self.subselect = tag
# Get subjects
subjects = Subjects(self.schoolyear)
# {sid -> subject_data}:
self.sid2subject_data = subjects.grade_subjects(group)
self.subjects = {} # name-mapping just for "real" subjects
self.composites = {} # name-mapping for composite sids
comp2weights = {} # collect weighted components for composites, etc.
for sid, sdata in self.sid2subject_data.items():
tids = sdata['TIDS']
if tids:
# "real" (taught) subject
self.subjects[sid] = subjects.subject_name(sid)
sdata['TIDS'] = tids.split()
else:
# "composite" subject
self.composites[sid] = subjects.subject_name(sid)
compos = sdata['COMPOSITE']
_compos = []
sdata['COMPOSITE'] = _compos
if compos:
for c in compos.split():
try:
ctag, w = c.split(':')
except ValueError:
ctag, w = c, 1
else:
w = int(w)
_compos.append(ctag)
comp2weights.setdefault(ctag, {})[sid] = w
# Add components to composites ...
self.extra_components = {}
for sid, s2w in comp2weights.items():
if sid in self.composites:
sdata = self.sid2subject_data[sid]
sdata['COMPONENTS'] = s2w
else:
self.extra_components.setdefault(sid, {})['COMPONENTS'] = s2w
#TODO?
self.components = set() # set of "component" sids
# The "real components" are not directly identifiable. They can
# be found by looking for a "real composite" in the COMPOSITE field.
# But actually, why should they be identified specifically?
# Perhaps to mark/separate them in the table? There is perhaps
# a better way of handling that? E.g. on the basis of their
# SGROUP?
for sid in self.subjects:
sdata = self.sid2subject_data[sid]
for c in sdata['COMPOSITE']:
if c in self.composites:
self.components.add(sid)
#TODO?
# data for "extra" sid-fields:
self.extras = _Grades.xgradefields(group, term)
# data for additional info fields, whose values are calculated
# from the other fields:
self.calcs = _Grades.calc_fields(group, term)
if term == 'Abitur':
# Modify for Abitur
AbiCalc.subjects(self)
#
def _include_grades(self, grades, gmap):
"""Return a grade mapping.
Include grades for all subjects and extra entries.
Initial values are taken from the mapping <gmap>: {sid -> grade}.
The expected entries are set previously in method <_set_group_term>.
"""
sid2grade = {}
for sid in self.subjects:
sid2grade[sid] = gmap.get(sid) or ''
for comp in self.composites:
sid2grade[comp] = gmap.get(comp) or ''
for xsid in self.extras:
# Where appropriate use default values
sid2grade[xsid] = grades.extras_default(xsid, gmap.get(xsid))
return sid2grade
#
def make_grade_table(self):
"""Build a basic pupil/subject table for grade input.
The field names (and TERM value) will be localized.
It will contain the existing grades. To get an empty table,
initialize the <GradeTable> instance using method <new_group_table>.
"""
### Get template file
template = GradeBase.group_info(self.group, 'NotentabelleVorlage')
template_path = os.path.join(RESOURCES, 'templates',
*template.split('/'))
table = KlassMatrix(template_path)
### Set title line
# table.setTitle("???")
table.setTitle2(Dates.timestamp())
### Translate and enter general info
info = (
(GRADE_INFO_FIELDS['SCHOOLYEAR'], str(self.schoolyear)),
(GRADE_INFO_FIELDS['GROUP'], self.group),
(GRADE_INFO_FIELDS['TERM'], self.term),
(GRADE_INFO_FIELDS['GRADES_D'], self.grades_d),
(GRADE_INFO_FIELDS['ISSUE_D'], self.issue_d)
)
table.setInfo(info)
### Go through the template columns and check if they are needed:
sidcol = []
col = 0
rowix = table.row0() # index of header row
for sid, sname in self.subjects.items():
# Add subject
col = table.nextcol()
sidcol.append((sid, col))
table.write(rowix, col, sid)
table.write(rowix + 1, col, sname)
# Enforce minimum number of columns
while col < 18:
col = table.nextcol()
table.write(rowix, col, None)
# Delete excess columns
table.delEndCols(col + 1)
### Add pupils
for pid, gmap in self.items():
row = table.nextrow()
table.write(row, 0, pid)
table.write(row, 1, self.name[pid])
table.write(row, 2, gmap.stream)
for sid, col in sidcol:
g = gmap.get(sid)
if g:
table.write(row, col, g)
# Delete excess rows
row = table.nextrow()
table.delEndRows(row)
### Save file
table.protectSheet()
return table.save()
#
def save(self, tag = None):
"""Save the data to the "database".
"""
fields = []
for sid in self.subjects:
fields.append(sid)
for xsid in self.extras:
fields.append(xsid)
# The calculated fields are not saved.
# Get line data
dlist = []
for pid, grades in self.items():
gmap = {}
dmap = {'PID': pid, 'NAME': self.name[pid],
'STREAM': grades.stream, '__DATA__': gmap}
for sid in fields:
v = grades.get(sid)
if v:
gmap[sid] = v
dlist.append(dmap)
# Get file path and write file
table_path = year_path(self.schoolyear,
GradeBase.table_path(self.group, self.term, tag))
data = {
'SCHOOLYEAR': self.schoolyear,
'GROUP': self.group,
'TERM': self.term,
'GRADES_D': self.grades_d,
'ISSUE_D': self.issue_d,
'__PUPILS__': dlist,
'__MODIFIED__': Dates.timestamp()
}
#TODO: Title?
return save_pack(table_path, **data)
#
def recalc(self, pid):
"""Calculate the values for the "Calc" fields.
Return a list: [(sid, val), ... ]
"""
svlist = []
if self.term == 'Abitur':
_ac = AbiCalc(self, pid).calculate()
for sid in self.calcs:
if sid == '.D':
svlist.append((sid, self.average(pid)))
elif sid == '.Dx':
svlist.append((sid, self.average_dem(pid)))
elif sid == '.Q':
svlist.append((sid, _ac['REPORT_TYPE']))
elif sid == '.N':
svlist.append((sid, _ac['Note']))
return svlist
#
def average(self, pid):
"""Calculate the average of all grades, including composites,
but ignoring components and non-numerical grades.
"""
asum = 0
ai = 0
grades = self[pid]
for sid in self.subjects:
if self.sid2subject_data[sid].composite:
# A component
continue
gi = grades.i_grade[sid]
if gi >= 0:
asum += gi
ai += 1
for sid in self.composites:
gi = grades.i_grade[sid]
if gi >= 0:
asum += gi
ai += 1
if ai:
return Frac(asum, ai).round(2)
else:
return '–––'
#
def average_dem(self, pid):
"""Special average for "Realschulabschluss": De-En_Ma only.
"""
asum = 0
ai = 0
grades = self[pid]
for sid in ('De', 'En', 'Ma'):
gi = grades.i_grade[sid]
if gi >= 0:
asum += gi
ai += 1
if ai:
return Frac(asum, ai).round(2)
else:
return '–––'
###
class GradeTableFile(_GradeTable):
def __init__(self, schoolyear, filepath):
"""Read the header info and pupils' grades from the given table file.
The "spreadsheet" module is used as backend so .ods, .xlsx and .tsv
formats are possible. The filename may be passed without extension –
<Spreadsheet> then looks for a file with a suitable extension.
<Spreadsheet> also supports in-memory binary streams (io.BytesIO)
with attribute 'filename' (so that the type-extension can be read).
The <info> mapping of the table should contain the keys:
'SCHOOLYEAR', 'GROUP', 'TERM', 'ISSUE_D', 'GRADES_D'
Only the non-empty cells from the source table will be included.
"""
super().__init__(schoolyear)
ss = Spreadsheet(filepath)
self.filepath = ss.filepath
dbt = ss.dbTable()
info = {}
# Translate info field names using reversed mapping.
rfields = {v: k for k, v in GRADE_INFO_FIELDS.items()}
for row in dbt.info:
if row[0]:
key, val = row[:2]
try:
key = rfields[key]
except KeyError:
# Also accept unlocalized field names
if key not in GRADE_INFO_FIELDS:
raise GradeTableError(_INVALID_INFO_KEY.format(
key = key, fpath = self.filepath))
info[key] = val
self.issue_d = info.get('ISSUE_D') or NO_DATE
self.grades_d = info.get('GRADES_D') or NO_DATE
term = info.get('TERM').replace(' ', '_')
subsel = GradeBase.term_info(term, 'subselect')
self._set_group_term(info.get('GROUP'), term,
self.grades_d if subsel == 'TAG' else None)
year = info.get('SCHOOLYEAR')
if year != str(self.schoolyear):
raise GradeTableError(_TABLE_YEAR_MISMATCH.format(
fpath = filepath))
sid2col = []
col = 0
for f in dbt.fieldnames():
if col > 2:
if f[0] != '$':
# This should be a subject tag
if f in self.subjects or f in self.extras:
sid2col.append((f, col))
else:
REPORT('WARN', _WARN_EXTRA_SUBJECT.format(sid = f,
tfile = self.filepath))
col += 1
# Only include non-empty cells from the source table
for row in dbt:
pid = row[0]
if pid and pid != '$':
gmap = {}
for sid, col in sid2col:
val = row[col]
if val:
gmap[sid] = val
self.name[pid] = row[1]
grades = _Grades(self.group, row[2], self.term)
#grades.init_grades(self._include_grades(grades, gmap))
grades.init_grades(gmap)
self[pid] = grades
###
class NewGradeTable(_GradeTable):
"""An empty grade table.
"""
def __init__(self, schoolyear, group, term, pids = None):
"""If <pids> is supplied it should be a list of pupil ids: only
these pupils will be included in the new table.
"""
super().__init__(schoolyear)
self._set_group_term(group, term, None)
self._new_group_table(pids)
###
class oldGradeTable(_GradeTable):
def __init__(self, schoolyear, group, term, tag = None, ok_new = False):
"""If <ok_new> is true, a new table may be created, otherwise
the table must already exist.
<tag> is for "term"-types with "subselect=TAG" only.
If the field 'ISSUE_D' is after the "current" date, or not yet
set, the table should be created as a new one – if there is an
existing table, its grade data will be imported.
"""
super().__init__(schoolyear)
self._set_group_term(group, term, tag)
# Get file path
table_path = year_path(schoolyear,
GradeBase.table_path(group, term, tag))
try:
# Read the "internal" table for this group/term(/tag)
gdata = get_pack(table_path, SCHOOLYEAR = schoolyear,
GROUP = group, TERM = term)
except FileNotFoundError:
# File doesn't exist
if not ok_new:
raise
self._new_group_table()
return
issue_d = gdata.get('ISSUE_D') or NO_DATE
if issue_d == NO_DATE or issue_d >= Dates.today():
# The data is not yet "closed".
self._new_group_table(grade_data = gdata)
return
self.issue_d = issue_d
self.grades_d = gdata.get('GRADES_D') or NO_DATE
for row in gdata.get('__PUPILS__'):
pid = row['PID']
gmap = row['__DATA__']
self.name[pid] = row['NAME']
grades = _Grades(group, row.get('STREAM'), term)
grades.init_grades(self._include_grades(grades, gmap))
self[pid] = grades
for comp in self.composites:
grades.composite_calc(self.sid2subject_data[comp])
#
def check_group_term(self, gtable):
"""Check that group and term in <gtable> match those of
the current instance.
"""
# if gtable.schoolyear != self.schoolyear:
# raise GradeTableError(_TABLE_YEAR_MISMATCH.format(
# fpath = gtable.filepath))
if gtable.group != self.group:
raise GradeTableError(_TABLE_CLASS_MISMATCH.format(
fpath = gtable.filepath))
if gtable.term != self.term:
raise GradeTableError(_TABLE_TERM_MISMATCH.format(
fpath = gtable.filepath))
#
def integrate_partial_data(self, *gtables):
"""Include the data from the given (partial) tables.
- Only non-empty source table fields will be used for updating.
- Only allow an entry to be supplied in one source table.
- Updates to non-empty fields will issue a warning.
The current grade table is updated but not saved.
Return the number of overwritten non-empty entries. This can be
used to decide whether the changes should be saved.
"""
tfiles = {} # {pid:sid -> table file} (keep track of sources)
overwrite = 0
for gtable in gtables:
for pid, grades in gtable.items():
try:
pgrades = self[pid]
except KeyError:
REPORT('WARN', _WARN_EXTRA_PUPIL.format(
name = gtable.name[pid],
tfile = gtable.filepath))
continue
for sid, g in grades.items():
if not g:
# This should not occur!
REPORT('ERROR', _NEW_GRADE_EMPTY.format(
sid = sid,
name = gtable.name[pid],
tfile = gtable.filepath))
continue # don't update
g0 = pgrades[sid]
key = '%s:%s' % (pid, sid)
tfile1 = tfiles.get(key)
tfile2 = gtable.filepath
tfiles[key] = tfile2
if g != g0:
if tfile1:
REPORT('ERROR', _ERROR_OVERWRITE2.format(
sid = sid,
name = gtable.name[pid],
tfile1 = tfile1,
tfile2 = tfile2))
continue # don't update
if g0:
overwrite += 1
REPORT('WARN', _WARN_OVERWRITE.format(
sid = sid,
name = gtable.name[pid],
tfile = tfile2))
pgrades[sid] = g
# A "recalc" should not be necessary if the grade file is
# reloaded after saving – which is the expected usage.
# Otherwise the calculations should probably be redone:
#for pid in self:
# self.recalc(pid)
return overwrite
#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
if __name__ == '__main__':
from core.base import init
init()
_schoolyear = '2016'
if True:
# if False:
_filepath = os.path.join(DATA, 'testing', 'Noten', 'NOTEN_A',
'Noten_13_A')
# _filepath = os.path.join(DATA, 'testing', 'Noten', 'NOTEN_1_11.G',
# 'Noten_11.G_1-AB')
_gtable = GradeTableFile(_schoolyear, _filepath)
print("SUBJECTS:", _gtable.subjects)
print("GROUP:", _gtable.group)
print("TERM:", _gtable.term)
print("YEAR:", _gtable.schoolyear)
print("ISSUE_D:", _gtable.issue_d)
print("GRADES_D:", _gtable.grades_d)
print("NAMES:", _gtable.name)
print("COMPOSITES:", _gtable.composites)
print("COMPONENTS:", _gtable.components)
print("EXTRAS:", _gtable.extras)
print("CALCS:", _gtable.calcs)
for _pid, _gdata in _gtable.items():
print("???", _pid, _gdata.stream, _gdata)
quit(0)
#TODO...
if True:
# if False:
_group = '12.G'
_term = '2'
print("\n\nGRADE TABLE for %s, term %s" % (_group, _term))
_gtable = GradeTable(_schoolyear, _group, _term, ok_new = True)
print("SUBJECTS:", _gtable.subjects)
print("GROUP:", _gtable.group)
print("TERM:", _gtable.term)
print("YEAR:", _gtable.schoolyear)
print("ISSUE_D:", _gtable.issue_d)
print("GRADES_D:", _gtable.grades_d)
print("NAMES:", _gtable.name)
for _pid, _gdata in _gtable.items():
print("???", _pid, _gdata.stream, _gdata)
if True:
# if False:
_group = '11.G'
_term = 'S2016-03-01'
print("\n\nGRADE TABLE for %s, term %s" % (_group, _term))
_gtable = GradeTable(_schoolyear, _group, _term, ok_new = True)
print("SUBJECTS:", _gtable.subjects)
print("GROUP:", _gtable.group)
print("TERM:", _gtable.term)
print("YEAR:", _gtable.schoolyear)
print("ISSUE_D:", _gtable.issue_d)
print("GRADES_D:", _gtable.grades_d)
print("NAMES:", _gtable.name)
for _pid, _gdata in _gtable.items():
print("???", _pid, _gdata.stream, _gdata)
print("INTERNAL: -->", _gtable.save())
quit(0)
if True:
# Read all existing test tables into the internal form
# if False:
odir = os.path.join(DATA, 'testing', 'tmp')
os.makedirs(odir, exist_ok = True)
from glob import glob
_filepath = os.path.join(DATA, 'testing', 'Noten', 'NOTEN_*', 'Noten_*')
for f in sorted(glob(_filepath)):
_gtable = GradeTableFile(_schoolyear, f)
print("READ", f)
fname = os.path.basename(f)
xlsx_bytes = _gtable.make_grade_table()
tfile = os.path.join(odir, fname.rsplit('.', 1)[0] + '.xlsx')
with open(tfile, 'wb') as fh:
fh.write(xlsx_bytes)
print("OUT:", tfile)
print("INTERNAL: -->", _gtable.save())
|
"""
Setup module for the jupyterlab_github proxy extension
"""
import setuptools
from setupbase import (
create_cmdclass, ensure_python, find_packages
)
data_files_spec = [
('etc/jupyter/jupyter_notebook_config.d',
'jupyter-config/jupyter_notebook_config.d', 'jupyterlab_github.json'),
]
cmdclass = create_cmdclass(data_files_spec=data_files_spec)
setup_dict = dict(
name='jupyterlab_github',
description='A Jupyter Notebook server extension which acts a proxy for the GitHub API.',
packages=find_packages(),
cmdclass=cmdclass,
author = 'Jupyter Development Team',
author_email = 'jupyter@googlegroups.com',
url = 'http://jupyter.org',
license = 'BSD',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Jupyter', 'JupyterLab', 'GitHub'],
python_requires = '>=3.5',
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
install_requires=[
'notebook'
]
)
try:
ensure_python(setup_dict["python_requires"].split(','))
except ValueError as e:
raise ValueError("{:s}, to use {} you must use python {} ".format(
e,
setup_dict["name"],
setup_dict["python_requires"])
)
from jupyterlab_github import __version__
setuptools.setup(
version=__version__,
**setup_dict
)
|
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
class Movie(models.Model):
title = models.CharField(max_length=32)
description = models.TextField(max_length=360)
def no_of_ratings(self):
ratings = Rating.objects.filter(movie=self)
return len(ratings)
def avg_rating(self):
sum = 0
ratings = Rating.objects.filter(movie=self)
for rating in ratings:
sum += rating.stars
if len(ratings) > 0:
return sum / len(ratings)
else:
return 0
class Rating(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
stars = models.IntegerField(
validators=[MinValueValidator(1),
MaxValueValidator(5)])
class Meta:
unique_together = (('user', 'movie'),)
index_together = (('user', 'movie'),)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#****************************************************************************************************************************************************
# Copyright 2017 NXP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the NXP. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
from typing import Optional
from FslBuildGen import IOUtil
from FslBuildGen.Log import Log
from FslBuildGen.Exceptions import UsageErrorException
from FslBuildGen.BuildExternal.PackageExperimentalRecipe import PackageExperimentalRecipe
from FslBuildGen.BuildExternal.RecipeBuilderSetup import RecipeBuilderSetup
from FslBuildGen.Vars.VariableProcessor import VariableProcessor
from FslBuildGen.Xml.XmlExperimentalRecipe import XmlExperimentalRecipe
class RecipePathBuilder(object):
def __init__(self, log: Log, variableProcessor: VariableProcessor, recipeBuilderSetup: Optional[RecipeBuilderSetup], platformName: str, compilerGeneratorName: str) -> None:
super(RecipePathBuilder, self).__init__()
self.__Log = log # type: Log
self.__VariableProcessor = variableProcessor # type: VariableProcessor
self.IsEnabled = recipeBuilderSetup is not None # type: bool
self.TargetPath = None # Optional[str]
self.DownloadCacheRootPath = None # Optional[str]
self.__TempRootPath = None # Optional[str]
self.__TempPipelineRootPath = None # Optional[str]
self.InstallRootPath = None # Optional[str]
self.ReadonlyCache_DownloadCacheRootPath = None # Optional[str]
if self.IsEnabled and recipeBuilderSetup is not None:
targetPath = recipeBuilderSetup.TargetPath
readonlyCachePath = recipeBuilderSetup.ReadonlyCachePath
if not IOUtil.IsAbsolutePath(targetPath):
raise Exception("Install area path is not absolute: '{0}'".format(targetPath))
if not readonlyCachePath is None and not IOUtil.IsAbsolutePath(readonlyCachePath):
raise Exception("Install area readonly cache path is not absolute: '{0}'".format(readonlyCachePath))
self.TargetPath = targetPath
self.DownloadCacheRootPath = IOUtil.Join(targetPath, ".DownloadCache")
self.__TempRootPath = IOUtil.Join(targetPath, ".Temp")
baseTempDirectory = IOUtil.Join(self.__TempRootPath, "pipeline")
baseTempDirectory = IOUtil.Join(baseTempDirectory, platformName)
self.__TempPipelineRootPath = IOUtil.Join(baseTempDirectory, compilerGeneratorName)
baseInstallDirectory = IOUtil.Join(targetPath, platformName)
self.InstallRootPath = IOUtil.Join(baseInstallDirectory, compilerGeneratorName)
self.ReadonlyCache_DownloadCacheRootPath = None if readonlyCachePath is None else IOUtil.Join(readonlyCachePath, ".DownloadCache")
def GetBuildPath(self, sourceRecipe: PackageExperimentalRecipe) -> str:
if not self.IsEnabled or self.__TempPipelineRootPath is None:
raise Exception("Can not GetBuildPath since the builder functionality has been disabled")
return IOUtil.Join(self.__TempPipelineRootPath, sourceRecipe.Name)
def TryGetInstallPath(self, xmlSourceRecipe: XmlExperimentalRecipe) -> Optional[str]:
if xmlSourceRecipe is None:
return None
elif not xmlSourceRecipe.ExternalInstallDirectory is None:
if not xmlSourceRecipe.Pipeline is None:
self.__Log.DoPrintWarning("SourceRecipe ExternalInstallDirectory overrides Pipeline '{0}'".format(xmlSourceRecipe.Name))
return self.__VariableProcessor.ResolveAbsolutePathWithLeadingEnvironmentVariablePathAsDir(xmlSourceRecipe.ExternalInstallDirectory)
if not self.IsEnabled or self.InstallRootPath is None:
raise Exception("Can not TryGetInstallPath since the builder functionality has been disabled, please enable the builder functionality for this project")
return None if xmlSourceRecipe.Pipeline is None else IOUtil.Join(self.InstallRootPath, xmlSourceRecipe.Name)
|
import olutils.collection as lib
def test_FlatStr():
assert repr(lib.FlatStr("Hello")) == "Hello"
|
import numpy as np
def open_pdb(file_location):
"""
Open and read coordinates from a pdb file.
The pdb file must specify the atom elements in the last column, and for
the conventions outlined in the pdb format specification.
Parameters
----------
file_location : string
The location of the pdb file to read.
Returns
-------
symbols : list
The atomic symbols of the pdb file.
coords : np.ndarray
The atomic coordinates of the pdb file.
"""
with open(file_location) as f:
data = f.readlines()
coordinates = []
symbols = []
for line in data:
if 'ATOM' in line[0:6] or 'HETATM' in line[0:6]:
symbols.append(line[76:79].strip())
atom_coords = [float(x) for x in line[30:55].split()]
coordinates.append(atom_coords)
coords = np.array(coordinates)
return symbols, coords
|
from flask import redirect, session
class Device_XML():
endpoints = ["/device.xml"]
endpoint_name = "file_device_xml"
def __init__(self, fhdhr):
self.fhdhr = fhdhr
def __call__(self, *args):
return self.get(*args)
def get(self, *args):
if self.fhdhr.config.dict["rmg"]["enabled"] and session["is_plexmediaserver"]:
return redirect("/rmg/device.xml")
else:
return redirect("/hdhr/device.xml")
|
from .semseg_loss import filter_valid_label, SemSegLoss
from .cross_entropy import CrossEntropyLoss
from .focal_loss import FocalLoss
from .smooth_L1 import SmoothL1Loss
__all__ = [
'filter_valid_label', 'SemSegLoss', 'CrossEntropyLoss', 'FocalLoss',
'SmoothL1Loss'
]
|
from easygraphics.turtle import *
create_world(800, 600)
set_speed(10)
lt(45)
fd(100)
lt(90)
move_arc(100, 90)
lt(90)
fd(100)
lt(90)
fd(100)
rt(90)
move_arc(-100, 90)
rt(90)
fd(100)
rt(90)
bk(100)
rt(90)
move_arc(100, -90)
rt(90)
bk(100)
rt(90)
bk(100)
lt(90)
move_arc(-100, -90)
lt(90)
bk(100)
lt(90)
pause()
close_world()
|
import time
import pytest
from ray import serve
from ray.serve.deployment_state import (
SLOW_STARTUP_WARNING_S,
SLOW_STARTUP_WARNING_PERIOD_S,
)
def test_slow_allocation_warning(serve_instance, capsys):
# this deployment can never be scheduled
@serve.deployment(ray_actor_options={"num_cpus": 99999})
class D:
def __init__(self):
pass
num_replicas = 2
D.options(num_replicas=num_replicas).deploy(_blocking=False)
expected_warning = (
f"Deployment '{D.name}' has "
f"{num_replicas} replicas that have taken "
f"more than {SLOW_STARTUP_WARNING_S}s "
f"to be scheduled."
)
# wait long enough for the warning to be printed
# with a small grace period
time.sleep(SLOW_STARTUP_WARNING_PERIOD_S * 1.5)
captured = capsys.readouterr()
print(captured.err)
assert expected_warning in captured.err
# make sure that exactly one warning was printed
# for this deployment
assert captured.err.count(expected_warning) == 1
def test_slow_initialization_warning(serve_instance, capsys):
# this deployment will take a while to allocate
@serve.deployment
class D:
def __init__(self):
time.sleep(99999)
num_replicas = 4
D.options(num_replicas=num_replicas).deploy(_blocking=False)
expected_warning = (
f"Deployment '{D.name}' has "
f"{num_replicas} replicas that have taken "
f"more than {SLOW_STARTUP_WARNING_S}s "
f"to initialize."
)
# wait long enough for the warning to be printed
# with a small grace period
time.sleep(SLOW_STARTUP_WARNING_PERIOD_S * 1.5)
captured = capsys.readouterr()
assert expected_warning in captured.err
# make sure that exactly one warning was printed
# for this deployment
assert captured.err.count(expected_warning) == 1
def test_deployment_init_error_logging(serve_instance, capsys):
@serve.deployment
class D:
def __init__(self):
0 / 0
with pytest.raises(RuntimeError):
D.deploy()
captured = capsys.readouterr()
assert "Exception in deployment 'D'" in captured.err
assert "ZeroDivisionError" in captured.err
|
from tfbspline.BSplineTF import BSpline, get_spline
from .util import interpolate
__version__ = '1.0.0'
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2012, Maximilian Köhl <linuxmaxi@googlemail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from libqtile.widget import base
class Pacman(base.ThreadedPollText):
"""Shows number of available updates
Needs the pacman package manager installed. So will only work in Arch Linux
installation.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('unavailable', 'ffffff', 'Unavailable Color - no updates.'),
('execute', None, 'Command to execute on click'),
('update_interval', 60, "The update interval."),
]
def __init__(self, **config):
base.deprecated("Pacman is deprecated, please use CheckUpdates")
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(Pacman.defaults)
def draw(self):
if self.text == '0':
self.layout.colour = self.unavailable
else:
self.layout.colour = self.foreground
base.ThreadedPollText.draw(self)
def poll(self):
pacman = self.call_process(['checkupdates'])
return str(len(pacman.splitlines()))
def button_press(self, x, y, button):
base.ThreadedPollText.button_press(self, x, y, button)
if button == 1 and self.execute is not None:
subprocess.Popen([self.execute], shell=True)
|
import codecs
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
import _sqlite3
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
def quote_value(self, value):
try:
value = _sqlite3.adapt(value)
except _sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
"""
Shortcut to transform a model from old_model into new_model
"""
# Work out the new fields dict / mapping
body = {f.name: f for f in model._meta.local_fields}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Choose a default and insert it into the copy map
if not field.many_to_many:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body
# Instantiating the new model with an alternate db_table will alter
# the internal references of some of the provided fields.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table + "__new",
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# Create a new table with that format. We remove things from the
# deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Rename the new to the old
self.alter_db_table(temp_model, temp_model._meta.db_table, model._meta.db_table)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql.replace(temp_model._meta.db_table, model._meta.db_table))
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.rel.through._meta.db_table == new_field.rel.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.rel.through,
alter_fields=[(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.rel.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
# Make a new through table
self.create_model(new_field.rel.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.rel.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.rel.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.rel.through)
|
import os
import shutil
from pathlib import Path
def get_obj_name(files):
for file in files:
if (str(file).endswith(".obj")):
return (str(file).split('.')[0], True)
return (None, False)
def filter_specify_file(root, files, name, type, verbose=False):
output = []
for file in files:
if (file.startswith(name) and os.path.splitext(file)[-1] in type):
output.append(os.path.join(root, file))
if verbose :
if (len(output) != 2) :
print("Filter ERROR\n")
while(1):
pass
print(output)
return output
def inverse_find(base, verbose=False):
output = []
for root, dirs, files in os.walk(base):
name, ans = get_obj_name(files)
if (ans is True):
tmp = filter_specify_file(root, files, name, ['.obj', '.bmp'])
if (len(tmp) > 0):
output.append(tmp)
return output
def copy(output, target_dir, verbose=False):
for file_pair in output:
for file in file_pair:
source = file
target_file = os.path.join(target_dir, source.split('/')[-1])
if verbose:
print("Source = ", file)
print("target = ", target_file)
if Path(target_file).exists():
print("Skipped : {0}".format(source))
else:
shutil.copy(source, target_dir)
if (Path(target_file).exists()):
print("Copyed successfully : {0} ".format(source))
else:
print("Error")
while(1):
pass
if __name__ == '__main__':
base_path = "/home/li_gang/TestFile/dataBase"
target_dir = '/home/li_gang/TestFile/LargeInput1'
out = inverse_find(base_path)
copy(out, target_dir)
|
from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.plas_impl import PLASImpl, PLASWithPerturbationImpl
class PLAS(AlgoBase):
r"""Policy in Latent Action Space algorithm.
PLAS is an offline deep reinforcement learning algorithm whose policy
function is trained in latent space of Conditional VAE.
Unlike other algorithms, PLAS can achieve good performance by using
its less constrained policy function.
.. math::
a \sim p_\beta (a|s, z=\pi_\phi(s))
where :math:`\beta` is a parameter of the decoder in Conditional VAE.
References:
* `Zhou et al., PLAS: latent action space for offline reinforcement
learning. <https://arxiv.org/abs/2011.07213>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for Conditional VAE.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the conditional VAE.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the conditional VAE.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
lam (float): weight factor for critic ensemble.
warmup_steps (int): the number of steps to warmup the VAE.
beta (float): KL reguralization term for Conditional VAE.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
impl (d3rlpy.algos.torch.bcq_impl.BCQImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_imitator_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_imitator_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_imitator_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_update_actor_interval: int
_lam: float
_warmup_steps: int
_beta: float
_use_gpu: Optional[Device]
_impl: Optional[PLASImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 1e-3,
imitator_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "mix",
update_actor_interval: int = 1,
lam: float = 0.75,
warmup_steps: int = 500000,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
impl: Optional[PLASImpl] = None,
**kwargs: Any
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._imitator_learning_rate = imitator_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._imitator_optim_factory = imitator_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._imitator_encoder_factory = check_encoder(imitator_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._update_actor_interval = update_actor_interval
self._lam = lam
self._warmup_steps = warmup_steps
self._beta = beta
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = PLASImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
lam=self._lam,
beta=self._beta,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
if self._grad_step < self._warmup_steps:
imitator_loss = self._impl.update_imitator(batch)
metrics.update({"imitator_loss": imitator_loss})
else:
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
if self._grad_step % self._update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_actor_target()
self._impl.update_critic_target()
return metrics
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
class PLASWithPerturbation(PLAS):
r"""Policy in Latent Action Space algorithm with perturbation layer.
PLAS with perturbation layer enables PLAS to output out-of-distribution
action.
References:
* `Zhou et al., PLAS: latent action space for offline reinforcement
learning. <https://arxiv.org/abs/2011.07213>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
imitator_learning_rate (float): learning rate for Conditional VAE.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
imitator_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the conditional VAE.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
imitator_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the conditional VAE.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
update_actor_interval (int): interval to update policy function.
lam (float): weight factor for critic ensemble.
action_flexibility (float): output scale of perturbation layer.
warmup_steps (int): the number of steps to warmup the VAE.
beta (float): KL reguralization term for Conditional VAE.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
impl (d3rlpy.algos.torch.bcq_impl.BCQImpl): algorithm implementation.
"""
_action_flexibility: float
_impl: Optional[PLASWithPerturbationImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 1e-3,
imitator_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
imitator_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
imitator_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 100,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "mix",
update_actor_interval: int = 1,
lam: float = 0.75,
action_flexibility: float = 0.05,
warmup_steps: int = 500000,
beta: float = 0.5,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
impl: Optional[PLASWithPerturbationImpl] = None,
**kwargs: Any
):
super().__init__(
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
imitator_learning_rate=imitator_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
imitator_optim_factory=imitator_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
imitator_encoder_factory=imitator_encoder_factory,
q_func_factory=q_func_factory,
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
update_actor_interval=update_actor_interval,
lam=lam,
warmup_steps=warmup_steps,
beta=beta,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
impl=impl,
**kwargs,
)
self._action_flexibility = action_flexibility
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = PLASWithPerturbationImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
imitator_learning_rate=self._imitator_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
imitator_optim_factory=self._imitator_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
imitator_encoder_factory=self._imitator_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
lam=self._lam,
beta=self._beta,
action_flexibility=self._action_flexibility,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
)
self._impl.build()
|
#!/usr/bin/python3
import os
BOSON_BUS = 10
BOSON_I2C_ADDR = 0x6C
CMD_REG = 0
def send_i2c(bus, address, reg, val):
os.system(f'sudo i2cset -f -y {bus} {hex(address)} {hex(reg)} {hex(val)}')
def read_i2c(bus, address, reg):
os.system(f'sudo i2cget -f -y {bus} {hex(address)} {hex(reg)}')
def send_packet(values):
for val in values:
send_i2c(BOSON_BUS, BOSON_I2C_ADDR, CMD_REG, val)
def send_ffc():
values = [0x8E, 0x00, 0x12, 0xC0, 0xFF, 0xEE, 0x00, 0x05, 0x00, 0x07,
0xFF, 0xFF, 0xFF, 0xFF, 0x6C, 0x5E, 0xAE]
send_packet(values)
if __name__ == '__main__':
send_ffc()
|
from Web import app as application
|
# flake8: noqa
from mot.trackers.single_object_trackers.gauss_sum_tracker import GaussSumTracker
from mot.trackers.single_object_trackers.nearest_neighbour_tracker import (
NearestNeighbourTracker,
)
from mot.trackers.single_object_trackers.probabilistic_data_association_tracker import (
ProbabilisticDataAssociationTracker,
)
|
import os, re, shutil, sys, sysconfig
import platform
import subprocess
from distutils.version import LooseVersion
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
# Project structure and CMake build steps adapted from
# https://www.benjack.io/2018/02/02/python-cpp-revisited.html
CMAKE_COMMAND = os.environ.get("CMAKE_COMMAND", "cmake")
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output([CMAKE_COMMAND, "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j2"]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
[CMAKE_COMMAND, ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env
)
subprocess.check_call(
[CMAKE_COMMAND, "--build", "."] + build_args, cwd=self.build_temp
)
print() # Add an empty line for cleaner output
def file_find_or_append(original_path, match_pattern, append, sep=" "):
# Adapted from SeqLib python package.
with open(original_path) as original_file:
original = original_file.readlines()
with open(original_path, "w") as replaced_file:
for line in original:
match = re.match(match_pattern, line)
if match:
print(line.rstrip(), *[flag for flag in append if flag not in match[0]], sep=sep, file=replaced_file)
else:
replaced_file.write(line)
class SeqLibCMakeBuild(CMakeBuild):
def run(self):
# To link into a shared library we need to add the -fPIC and other flags to SeqLib dependencies
# before building. Adapted from SeqLib python package.
bwa_makefile_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "lib/seqlib", "bwa", "Makefile"
)
file_find_or_append(bwa_makefile_path, r"^CFLAGS\s*=.*$", ["-fPIC","-Wno-unused-result"])
super().run()
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
setup(
name="npsv",
version="0.1.0",
description="Non-parametric genotyper for structural variants",
long_description=readme,
author="Michael Linderman",
author_email="mlinderman@middlebury.edu",
license=license,
url="https://github.com/mlinderm/npsv",
scripts=[
"scripts/synthBAM",
"scripts/vcf2bed",
"scripts/vcf2samplot",
"scripts/svviz22vcf",
"scripts/padAlleles",
"scripts/fixSVVCF",
],
entry_points="""
[console_scripts]
npsv=npsv.main:main
npsvg=npsv.npsvg:main
""",
packages=find_packages("src"),
package_dir={"": "src"},
ext_modules=[CMakeExtension("npsv/npsv")],
cmdclass=dict(build_ext=SeqLibCMakeBuild),
zip_safe=False,
test_suite="tests",
include_package_data=True,
data_files=[
(
"etc",
[
"etc/human_g1k_v37.genome",
"etc/human_g1k_v37.gaps.bed.gz",
"etc/human_g1k_v37.gaps.bed.gz.tbi",
],
)
],
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
],
)
|
#!U:\PROG\Reknamorcen\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
def section1():
import dtlpy as dl
if dl.token_expired():
dl.login()
organization = dl.organizations.get(organization_name=org_name)
with open(r"C:\gcsfile.json", 'r') as f:
gcs_json = json.load(f)
gcs_to_string = json.dumps(gcs_json)
organization.integrations.create(name='gcsintegration',
integrations_type=dl.ExternalStorage.GCS,
options={'key': '',
'secret': '',
'content': gcs_to_string})
def section2():
import dtlpy as dl
if dl.token_expired():
dl.login()
organization = dl.organizations.get(organization_name='my-org')
organization.integrations.create(name='S3integration', integrations_type=dl.ExternalStorage.S3,
options={'key': "my_key", 'secret': "my_secret"})
def section3():
import dtlpy as dl
if dl.token_expired():
dl.login()
organization = dl.organizations.get(organization_name='my-org')
organization.integrations.create(name='azureintegration',
integrations_type=dl.ExternalStorage.AZUREBLOB,
options={'key': 'my_key',
'secret': 'my_secret',
'clientId': 'my_clientId',
'tenantId': 'my_tenantId'})
def section4():
# param name: the driver name
# param driver_type: ExternalStorage.S3, ExternalStorage.GCS , ExternalStorage.AZUREBLOB
# param integration_id: the integration id
# param bucket_name: the external bucket name
# param project_id:
# param allow_external_delete:
# param region: relevant only for s3 - the bucket region
# param storage_class: relevant only for s3
# param path: Optional. By default, path is the root folder. Path is case sensitive.
# return: driver object
import dtlpy as dl
project = dl.projects.get('prject_name')
driver = project.drivers.create(name='driver_name',
driver_type=dl.ExternalStorage.S3,
integration_id='integration_id',
bucket_name='bucket_name',
allow_external_delete=True,
region='eu-west-1',
storage_class="",
path="")
def section5():
# create a dataset from a driver name, you can also create by the driver ID
import dtlpy as dl
project: dl.Project
dataset = project.datasets.create(dataset_name=dataset_name,
driver=driver)
dataset.sync()
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QA fetch module
@yutiansut
QAFetch is Under [QAStandard#0.0.2@10x] Protocol
"""
from QUANTAXIS.QAData.QADataStruct import (QA_DataStruct_Future_day,
QA_DataStruct_Future_min,
QA_DataStruct_Future_realtime,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min,
QA_DataStruct_Stock_realtime,
QA_DataStruct_Index_day,
QA_DataStruct_Index_min)
from QUANTAXIS.QAFetch import QAEastMoney as QAEM
from QUANTAXIS.QAFetch import QAQuery
from QUANTAXIS.QAFetch import QAQuery_Advance as QAQueryAdv
from QUANTAXIS.QAFetch import QAQuery_Async as QAQueryAsync
from QUANTAXIS.QAFetch import QATdx as QATdx
from QUANTAXIS.QAFetch import QAThs as QAThs
from QUANTAXIS.QAFetch import QATushare as QATushare
from QUANTAXIS.QAFetch import QAWind as QAWind
from QUANTAXIS.QAUtil.QAParameter import (DATABASE_TABLE, DATASOURCE,
FREQUENCE, MARKET_TYPE,
OUTPUT_FORMAT)
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_setting
class QA_Fetcher():
def __init__(self, uri='mongodb://192.168.4.248:27017/quantaxis', username='', password=''):
"""
初始化的时候 会初始化
"""
self.database = QA_util_sql_mongo_setting(uri).quantaxis
self.history = {}
self.best_ip = QATdx.select_best_ip()
def change_ip(self, uri):
self.database = QA_util_sql_mongo_setting(uri).quantaxis
return self
def get_quotation(self, code=None, start=None, end=None, frequence=None, market=None, source=None, output=None):
"""
Arguments:
code {str/list} -- 证券/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
pass
def get_info(self, code, frequence, market, source, output):
if source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_info(code, self.best_ip)
return res
elif source is DATASOURCE.MONGO:
res = QAQuery.QA_fetch_stock_info(
code, format=output, collections=self.database.stock_info)
return res
# todo 🛠 output 参数没有用到, 默认返回的 是 QA_DataStruct
def QA_get_tick(code, start, end, market):
"""
统一的获取期货/股票tick的接口
"""
res = None
if market == MARKET_TYPE.STOCK_CN:
res = QATdx.QA_fetch_get_stock_transaction(code, start, end)
elif market == MARKET_TYPE.FUTURE_CN:
res = QATdx.QA_fetch_get_future_transaction(code, start, end)
return res
def QA_get_realtime(code, market):
"""
统一的获取期货/股票实时行情的接口
"""
res = None
if market == MARKET_TYPE.STOCK_CN:
res = QATdx.QA_fetch_get_stock_realtime(code)
elif market == MARKET_TYPE.FUTURE_CN:
res = QATdx.QA_fetch_get_future_realtime(code)
return res
def QA_quotation(code, start, end, frequence, market, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME):
"""一个统一的获取k线的方法
如果使用mongo,从本地数据库获取,失败则在线获取
Arguments:
code {str/list} -- 期货/股票的代码
start {str} -- 开始日期
end {str} -- 结束日期
frequence {enum} -- 频率 QA.FREQUENCE
market {enum} -- 市场 QA.MARKET_TYPE
source {enum} -- 来源 QA.DATASOURCE
output {enum} -- 输出类型 QA.OUTPUT_FORMAT
"""
res = None
if market == MARKET_TYPE.STOCK_CN:
if frequence == FREQUENCE.DAY:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_stock_day_adv(code, start, end)
except:
res = None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_stock_day(code, start, end, '00')
res = QA_DataStruct_Stock_day(res.set_index(['date', 'code']))
elif source == DATASOURCE.TUSHARE:
res = QATushare.QA_fetch_get_stock_day(code, start, end, '00')
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_stock_min_adv(
code, start, end, frequence=frequence)
except:
res = None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_stock_min(
code, start, end, frequence=frequence)
res = QA_DataStruct_Stock_min(
res.set_index(['datetime', 'code']))
elif market == MARKET_TYPE.FUTURE_CN:
if frequence == FREQUENCE.DAY:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_future_day_adv(code, start, end)
except:
res = None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_future_day(code, start, end)
res = QA_DataStruct_Future_day(res.set_index(['date', 'code']))
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_future_min_adv(
code, start, end, frequence=frequence)
except:
res = None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_future_min(
code, start, end, frequence=frequence)
res = QA_DataStruct_Future_min(
res.set_index(['datetime', 'code']))
elif market == MARKET_TYPE.INDEX_CN:
if frequence == FREQUENCE.DAY:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_index_day_adv(code, start, end)
except:
return None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_index_day(code, start, end)
res = QA_DataStruct_Index_day(res.set_index(['date', 'code']))
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source == DATASOURCE.MONGO:
try:
res = QAQueryAdv.QA_fetch_index_min_adv(
code, start, end, frequence=frequence)
except:
res = None
if source == DATASOURCE.TDX or res == None:
res = QATdx.QA_fetch_get_index_min(
code, start, end, frequence=frequence)
res = QA_DataStruct_Index_min(
res.set_index(['datetime', 'code']))
elif market == MARKET_TYPE.OPTION_CN:
if source == DATASOURCE.MONGO:
#res = QAQueryAdv.QA_fetch_option_day_adv(code, start, end)
raise NotImplementedError('CURRENT NOT FINISH THIS METHOD')
# print(type(res))
if output is OUTPUT_FORMAT.DATAFRAME:
return res.data
elif output is OUTPUT_FORMAT.DATASTRUCT:
return res
elif output is OUTPUT_FORMAT.NDARRAY:
return res.to_numpy()
elif output is OUTPUT_FORMAT.JSON:
return res.to_json()
elif output is OUTPUT_FORMAT.LIST:
return res.to_list()
class AsyncFetcher():
def __init__(self):
pass
async def get_quotation(self, code=None, start=None, end=None, frequence=None, market=MARKET_TYPE.STOCK_CN, source=None, output=None):
if market is MARKET_TYPE.STOCK_CN:
if frequence is FREQUENCE.DAY:
if source is DATASOURCE.MONGO:
res = await QAQueryAsync.QA_fetch_stock_day(code, start, end)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_day(
code, start, end, frequence=frequence)
elif frequence in [FREQUENCE.ONE_MIN, FREQUENCE.FIVE_MIN, FREQUENCE.FIFTEEN_MIN, FREQUENCE.THIRTY_MIN, FREQUENCE.SIXTY_MIN]:
if source is DATASOURCE.MONGO:
res = await QAQueryAsync.QA_fetch_stock_min(code, start, end, frequence=frequence)
elif source is DATASOURCE.TDX:
res = QATdx.QA_fetch_get_stock_min(
code, start, end, frequence=frequence)
return res
if __name__ == '__main__':
import asyncio
# print(QA_quotation('000001', '2017-01-01', '2017-01-31', frequence=FREQUENCE.DAY,
# market=MARKET_TYPE.STOCK_CN, source=DATASOURCE.TDX, output=OUTPUT_FORMAT.DATAFRAME))
Fetcher = AsyncFetcher()
loop = asyncio.get_event_loop()
res = loop.run_until_complete(asyncio.gather(
# 这几个是异步的
Fetcher.get_quotation('000001', '2018-07-01', '2018-07-15',
FREQUENCE.DAY, MARKET_TYPE.STOCK_CN, DATASOURCE.MONGO),
Fetcher.get_quotation('000001', '2018-07-12', '2018-07-15',
FREQUENCE.FIFTEEN_MIN, MARKET_TYPE.STOCK_CN, DATASOURCE.MONGO),
# 这个是同步的
Fetcher.get_quotation('000001', '2018-07-12', '2018-07-15',
FREQUENCE.FIFTEEN_MIN, MARKET_TYPE.STOCK_CN, DATASOURCE.TDX),
))
print(res)
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for utils.py."""
from __future__ import absolute_import
from __future__ import unicode_literals
import base64
import copy
import datetime
import os
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.tests import test_utils
from typing import Any, Dict, List
class UtilsTests(test_utils.GenericTestBase):
"""Test the core utility methods."""
def test_get_comma_sep_string_from_list(self) -> None:
"""Test get_comma_sep_string_from_list method."""
alist = ['a', 'b', 'c', 'd']
results = ['', 'a', 'a and b', 'a, b and c', 'a, b, c and d']
for i in range(len(alist) + 1):
comma_sep_string = utils.get_comma_sep_string_from_list(alist[:i])
self.assertEqual(comma_sep_string, results[i])
def test_to_ascii(self) -> None:
"""Test to_ascii method."""
parsed_str = utils.to_ascii('abc')
self.assertEqual(parsed_str, 'abc')
parsed_str = utils.to_ascii('¡Hola!')
self.assertEqual(parsed_str, 'Hola!')
parsed_str = utils.to_ascii(
u'Klüft skräms inför på fédéral électoral große')
self.assertEqual(
parsed_str, 'Kluft skrams infor pa federal electoral groe')
parsed_str = utils.to_ascii('')
self.assertEqual(parsed_str, '')
def test_yaml_dict_conversion(self) -> None:
"""Test yaml_from_dict and dict_from_yaml methods."""
test_dicts = [{}, {'a': 'b'}, {'a': 2}, {'a': ['b', 2, {'c': 3.5}]}]
for adict in test_dicts:
yaml_str = python_utils.yaml_from_dict(adict) # type: ignore[no-untyped-call]
yaml_dict = utils.dict_from_yaml(yaml_str)
self.assertEqual(adict, yaml_dict)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
utils.InvalidInputException,
'while parsing a flow node\n'
'expected the node content, but found \'<stream end>\'\n'):
yaml_str = utils.dict_from_yaml('{')
def test_recursively_remove_key_for_empty_dict(self) -> None:
"""Test recursively_remove_key method for an empty dict."""
d: Dict[str, Any] = {}
utils.recursively_remove_key(d, 'a')
self.assertEqual(d, {})
def test_recursively_remove_key_for_single_key_dict(self) -> None:
"""Test recursively_remove_key method for single key dict."""
d = {'a': 'b'}
utils.recursively_remove_key(d, 'a')
self.assertEqual(d, {})
def test_recursively_remove_key_for_multi_key_dict(self) -> None:
"""Test recursively_remove_key method for multi key dict."""
d = {'a': 'b', 'c': 'd'}
utils.recursively_remove_key(d, 'a')
self.assertEqual(d, {'c': 'd'})
def test_recursively_remove_key_for_dict_with_value_dict(self) -> None:
"""Test recursively_remove_key method for dict with a value dict."""
d = {'a': 'b', 'c': {'a': 'b'}}
utils.recursively_remove_key(d, 'a')
self.assertEqual(d, {'c': {}})
def test_recursively_remove_key_for_list(self) -> None:
"""Test recursively_remove_key method for list."""
l = ['a', 'b', {'c': 'd'}]
utils.recursively_remove_key(l, 'c')
self.assertEqual(l, ['a', 'b', {}])
def test_camelcase_to_hyphenated(self) -> None:
"""Test camelcase_to_hyphenated method."""
test_cases = [
('AbcDef', 'abc-def'),
('Abc', 'abc'),
('abc_def', 'abc_def'),
('Abc012Def345', 'abc012-def345'),
('abcDef', 'abc-def'),
]
for test_case in test_cases:
self.assertEqual(
utils.camelcase_to_hyphenated(test_case[0]), test_case[1])
def test_camelcase_to_snakecase(self) -> None:
"""Test camelcase_to_hyphenated method."""
test_cases = [
('AbcDef', 'abc_def'),
('Abc', 'abc'),
('abc_def', 'abc_def'),
('Abc012Def345', 'abc012_def345'),
('abcDef', 'abc_def'),
('abc-def', 'abc-def'),
]
for test_case in test_cases:
self.assertEqual(
utils.camelcase_to_snakecase(test_case[0]), test_case[1])
def test_set_url_query_parameter(self) -> None:
"""Test set_url_query_parameter method."""
self.assertEqual(
utils.set_url_query_parameter('http://www.test.com', 'a', 'b'),
'http://www.test.com?a=b'
)
self.assertEqual(
utils.set_url_query_parameter('http://www.test.com?a=b', 'c', 'd'),
'http://www.test.com?a=b&c=d'
)
self.assertEqual(
utils.set_url_query_parameter(
'http://test.com?a=b', 'redirectUrl', 'http://redirect.com'),
'http://test.com?a=b&redirectUrl=http%3A%2F%2Fredirect.com'
)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'URL query parameter name must be a string'
):
utils.set_url_query_parameter('http://test.com?a=b', None, 'value') # type: ignore[arg-type]
def test_convert_to_hash(self) -> None:
"""Test convert_to_hash() method."""
orig_string = 'name_to_convert'
full_hash = utils.convert_to_hash(orig_string, 28)
abbreviated_hash = utils.convert_to_hash(orig_string, 5)
self.assertEqual(len(full_hash), 28)
self.assertEqual(len(abbreviated_hash), 5)
self.assertEqual(full_hash[:5], abbreviated_hash)
self.assertTrue(full_hash.isalnum())
def test_vfs_construct_path(self) -> None:
"""Test vfs_construct_path method."""
p = utils.vfs_construct_path('a', 'b', 'c')
self.assertEqual(p, 'a/b/c')
p = utils.vfs_construct_path('a/', '/b', 'c')
self.assertEqual(p, '/b/c')
p = utils.vfs_construct_path('a/', 'b', 'c')
self.assertEqual(p, 'a/b/c')
p = utils.vfs_construct_path('a', '/b', 'c')
self.assertEqual(p, '/b/c')
p = utils.vfs_construct_path('/a', 'b/')
self.assertEqual(p, '/a/b/')
def test_vfs_normpath(self) -> None:
p = utils.vfs_normpath('/foo/../bar')
self.assertEqual(p, '/bar')
p = utils.vfs_normpath('foo//bar')
self.assertEqual(p, 'foo/bar')
p = utils.vfs_normpath('foo/bar/..')
self.assertEqual(p, 'foo')
p = utils.vfs_normpath('/foo//bar//baz//')
self.assertEqual(p, '/foo/bar/baz')
p = utils.vfs_normpath('')
self.assertEqual(p, '.')
p = utils.vfs_normpath('//foo//bar//baz//')
self.assertEqual(p, '//foo/bar/baz')
def test_capitalize_string(self) -> None:
test_data: List[List[str]] = [
['', ''],
['a', 'A'],
['A', 'A'],
['1', '1'],
['lowercase', 'Lowercase'],
['UPPERCASE', 'UPPERCASE'],
['Partially', 'Partially'],
['miDdle', 'MiDdle'],
['2be', '2be'],
]
for datum in test_data:
self.assertEqual(utils.capitalize_string(datum[0]), datum[1])
def test_generate_random_string(self) -> None:
# Generate a random string of length 12.
random_string = utils.generate_random_string(12)
self.assertTrue(isinstance(random_string, python_utils.BASESTRING))
self.assertEqual(len(random_string), 12)
def test_convert_png_data_url_to_binary_with_incorrect_prefix(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'The given string does not represent a PNG data URL'
):
utils.convert_png_data_url_to_binary('data:image/jpg;base64,')
def test_get_thumbnail_icon_url_for_category(self) -> None:
self.assertEqual(
utils.get_thumbnail_icon_url_for_category('Architecture'),
'/subjects/Architecture.svg')
self.assertEqual(
utils.get_thumbnail_icon_url_for_category('Graph Theory'),
'/subjects/GraphTheory.svg')
self.assertEqual(
utils.get_thumbnail_icon_url_for_category('Nonexistent'),
'/subjects/Lightbulb.svg')
def test_are_datetimes_close(self) -> None:
initial_time = datetime.datetime(2016, 12, 1, 0, 0, 0)
with self.swap(feconf, 'PROXIMAL_TIMEDELTA_SECS', 2):
self.assertTrue(utils.are_datetimes_close(
datetime.datetime(2016, 12, 1, 0, 0, 1),
initial_time))
self.assertFalse(utils.are_datetimes_close(
datetime.datetime(2016, 12, 1, 0, 0, 3),
initial_time))
def test_conversion_between_string_and_naive_datetime_object(self) -> None:
"""Tests to make sure converting a naive datetime object to a string and
back doesn't alter the naive datetime object data.
"""
now = datetime.datetime.utcnow()
self.assertEqual(
utils.convert_string_to_naive_datetime_object(
utils.convert_naive_datetime_to_string(now)),
now)
def test_datetime_conversion_to_string_returns_correct_format(self) -> None:
initial_time = datetime.datetime(2016, 12, 1, 1, 2, 3)
self.assertEqual(
utils.convert_naive_datetime_to_string(initial_time),
'12/01/2016, 01:02:03:000000')
def test_string_to_datetime_conversion_returns_correct_datetime(
self
) -> None:
time_string = '12/01/2016, 01:02:03:000000'
initial_time = datetime.datetime(2016, 12, 1, 1, 2, 3)
self.assertEqual(
utils.convert_string_to_naive_datetime_object(time_string),
initial_time)
def test_create_string_from_largest_unit_in_timedelta_raises_for_zero_diff(
self
) -> None:
timedelta_object = datetime.timedelta(days=0)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Expected a positive timedelta, received: %s.' % (
timedelta_object.total_seconds())):
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
def test_create_string_from_largest_unit_in_timedelta_raises_for_neg_diff(
self
) -> None:
timedelta_object = datetime.timedelta(days=-40)
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Expected a positive timedelta, received: %s.' % (
timedelta_object.total_seconds())):
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
def test_create_string_from_largest_unit_in_timedelta_returns_days(
self
) -> None:
timedelta_object = datetime.timedelta(
days=4, hours=1, minutes=1, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '4 days')
def test_create_string_from_largest_unit_in_timedelta_returns_a_day(
self
) -> None:
timedelta_object = datetime.timedelta(
days=1, hours=1, minutes=1, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 day')
def test_create_string_from_largest_unit_in_timedelta_returns_hours(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=2, minutes=1, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '2 hours')
def test_create_string_from_largest_unit_in_timedelta_returns_an_hour(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=1, minutes=1, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 hour')
def test_create_string_from_largest_unit_in_timedelta_returns_minutes(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=0, minutes=4, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '4 minutes')
def test_create_string_from_largest_unit_in_timedelta_returns_a_minute(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=0, minutes=1, seconds=12)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 minute')
def test_create_string_from_largest_unit_in_timedelta_returns_a_min_for_min(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=0, minutes=1, seconds=0)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 minute')
def test_create_string_from_largest_unit_in_timedelta_returns_minute_if_sec(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=0, minutes=0, seconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 minute')
def test_create_string_from_largest_unit_in_timedelta_returns_a_min_if_msec(
self
) -> None:
timedelta_object = datetime.timedelta(
days=0, hours=0, minutes=0, seconds=0, milliseconds=1)
time_string = (
utils.create_string_from_largest_unit_in_timedelta(timedelta_object)
)
self.assertEqual(time_string, '1 minute')
def test_get_hashable_value(self) -> None:
json1 = ['foo', 'bar', {'baz': 3}]
json2 = ['fee', {'fie': ['foe', 'fum']}]
json1_deepcopy = copy.deepcopy(json1)
json2_deepcopy = copy.deepcopy(json2)
test_set = {utils.get_hashable_value(json1)}
self.assertIn(utils.get_hashable_value(json1_deepcopy), test_set)
test_set.add(utils.get_hashable_value(json2))
self.assertEqual(
test_set, {
utils.get_hashable_value(json1_deepcopy),
utils.get_hashable_value(json2_deepcopy),
})
def test_is_supported_audio_language_code(self) -> None:
self.assertTrue(utils.is_supported_audio_language_code('hi-en'))
self.assertFalse(utils.is_supported_audio_language_code('unknown'))
def test_is_valid_language_code(self) -> None:
self.assertTrue(utils.is_valid_language_code('en'))
self.assertFalse(utils.is_valid_language_code('unknown'))
def test_require_valid_name(self) -> None:
name = 'name'
utils.require_valid_name(name, 'name_type')
invalid_name = 0
with self.assertRaisesRegexp(Exception, '0 must be a string.'): # type: ignore[no-untyped-call]
# Type ignore is used below because we are providing integer
# argument instead of string for invalid_name for testing purposes.
utils.require_valid_name(invalid_name, 'name_type') # type: ignore[arg-type]
def test_require_valid_meta_tag_content(self) -> None:
meta_tag_content = 'name'
utils.require_valid_meta_tag_content(meta_tag_content)
non_string_meta_tag_content = 0
invalid_type_error = (
'Expected meta tag content to be a string, received 0')
with self.assertRaisesRegexp(Exception, invalid_type_error): # type: ignore[no-untyped-call]
utils.require_valid_meta_tag_content(non_string_meta_tag_content) # type: ignore[arg-type]
lengthy_meta_tag_content = 'a' * 200
max_length_error = (
'Meta tag content should not be longer than %s characters.'
% constants.MAX_CHARS_IN_META_TAG_CONTENT)
with self.assertRaisesRegexp(Exception, max_length_error): # type: ignore[no-untyped-call]
utils.require_valid_meta_tag_content(lengthy_meta_tag_content)
def test_require_valid_page_title_fragment_for_web(self) -> None:
page_title_fragment_for_web = 'name'
utils.require_valid_page_title_fragment_for_web(
page_title_fragment_for_web)
non_string_page_title_fragment_for_web = 0
invalid_type_error = (
'Expected page title fragment to be a string, received 0')
with self.assertRaisesRegexp(Exception, invalid_type_error): # type: ignore[no-untyped-call]
utils.require_valid_page_title_fragment_for_web(
non_string_page_title_fragment_for_web) # type: ignore[arg-type]
lengthy_page_title_fragment_for_web = 'a' * 60
max_length_error = (
'Page title fragment should not be longer than %s characters.'
% constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)
with self.assertRaisesRegexp(Exception, max_length_error): # type: ignore[no-untyped-call]
utils.require_valid_page_title_fragment_for_web(
lengthy_page_title_fragment_for_web)
def test_require_valid_url_fragment(self) -> None:
name = 'name'
utils.require_valid_url_fragment(name, 'name-type', 20)
name_with_spaces = 'name with spaces'
name_with_spaces_expected_error = (
'name-type field contains invalid characters. Only '
'lowercase words separated by hyphens are allowed. '
'Received name with spaces.')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, name_with_spaces_expected_error):
utils.require_valid_url_fragment(
name_with_spaces, 'name-type', 20)
name_in_caps = 'NAME'
name_in_caps_expected_error = (
'name-type field contains invalid characters. Only '
'lowercase words separated by hyphens are allowed. Received NAME.')
with self.assertRaisesRegexp(Exception, name_in_caps_expected_error): # type: ignore[no-untyped-call]
utils.require_valid_url_fragment(
name_in_caps, 'name-type', 20)
name_with_numbers = 'nam3'
name_with_numbers_expected_error = (
'name-type field contains invalid characters. Only '
'lowercase words separated by hyphens are allowed. Received nam3.')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, name_with_numbers_expected_error):
utils.require_valid_url_fragment(
name_with_numbers, 'name-type', 20)
long_name = 'a-really-really-really-lengthy-name'
long_name_expected_error = (
'name-type field should not exceed 10 characters, '
'received %s' % long_name)
with self.assertRaisesRegexp(Exception, long_name_expected_error): # type: ignore[no-untyped-call]
utils.require_valid_url_fragment(
long_name, 'name-type', 10)
empty_name = ''
empty_name_expected_error = 'name-type field should not be empty.'
with self.assertRaisesRegexp(Exception, empty_name_expected_error): # type: ignore[no-untyped-call]
utils.require_valid_url_fragment(empty_name, 'name-type', 20)
non_string_name = 0
non_string_name_expected_error = (
'name-type field must be a string. Received 0.')
with self.assertRaisesRegexp(Exception, non_string_name_expected_error): # type: ignore[no-untyped-call]
utils.require_valid_url_fragment(non_string_name, 'name-type', 20) # type: ignore[arg-type]
def test_validate_convert_to_hash(self) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Expected string, received 1 of type %s' % type(1)):
utils.convert_to_hash(1, 10) # type: ignore[arg-type]
def test_convert_png_to_data_url_with_non_png_image_raises_error(
self
) -> None:
favicon_filepath = os.path.join(
self.get_static_asset_filepath(), 'assets', 'favicon.ico') # type: ignore[no-untyped-call]
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'The given string does not represent a PNG image.'):
utils.convert_png_to_data_url(favicon_filepath)
def test_get_exploration_components_from_dir_with_invalid_path_raises_error(
self
) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception,
'Found invalid non-asset file .+'
'There should only be a single non-asset file, and it should have '
'a .yaml suffix.'
):
utils.get_exploration_components_from_dir('core/tests/load_tests')
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'The only directory in . should be assets/'):
utils.get_exploration_components_from_dir('.')
def test_get_exploration_components_from_dir_with_multiple_yaml_files(
self
) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception,
'More than one non-asset file specified for '
'core/tests/data/dummy_assets/assets'):
utils.get_exploration_components_from_dir(
'core/tests/data/dummy_assets/assets/')
def test_get_exploration_components_from_dir_with_no_yaml_file(
self
) -> None:
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception,
'No yaml file specifed for core/tests/data/dummy_assets'):
utils.get_exploration_components_from_dir(
'core/tests/data/dummy_assets/')
def test_get_asset_dir_prefix_with_prod_mode(self) -> None:
with self.swap(constants, 'DEV_MODE', False):
self.assertEqual(utils.get_asset_dir_prefix(), '/build')
def test_base64_from_int(self) -> None:
base64_number = utils.base64_from_int(108)
self.assertEqual(base64.b64decode(base64_number), b'[108]')
def test_get_supported_audio_language_description_with_invalid_code(
self
) -> None:
valid_language_code = 'en'
expected_language_description = 'English'
self.assertEqual(
utils.get_supported_audio_language_description(valid_language_code),
expected_language_description)
invalid_language_code = 'invalid_code'
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'Unsupported audio language code: invalid_code'):
utils.get_supported_audio_language_description(
invalid_language_code)
def test_is_user_id_valid(self) -> None:
self.assertTrue(
utils.is_user_id_valid(
feconf.SYSTEM_COMMITTER_ID, allow_system_user_id=True))
self.assertTrue(
utils.is_user_id_valid(
feconf.MIGRATION_BOT_USER_ID, allow_system_user_id=True))
self.assertTrue(
utils.is_user_id_valid(
feconf.SUGGESTION_BOT_USER_ID, allow_system_user_id=True))
self.assertTrue(
utils.is_user_id_valid(
'pid_%s' % ('a' * 32), allow_pseudonymous_id=True))
self.assertTrue(
utils.is_user_id_valid('uid_%s' % ('a' * 32)))
self.assertFalse(
utils.is_user_id_valid('pid_%s' % ('a' * 32)))
self.assertFalse(
utils.is_user_id_valid('uid_%s%s' % ('a' * 31, 'A')))
self.assertFalse(
utils.is_user_id_valid('uid_%s' % ('a' * 31)))
self.assertFalse(utils.is_user_id_valid('a' * 36))
def test_is_pseudonymous_id(self) -> None:
self.assertTrue(utils.is_pseudonymous_id('pid_' + 'a' * 32))
self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 32))
self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 31 + 'A'))
self.assertFalse(utils.is_pseudonymous_id('uid_' + 'a' * 31))
self.assertFalse(utils.is_pseudonymous_id('a' * 36))
def test_snake_case_to_camel_case(self) -> None:
camel_case_str1 = utils.snake_case_to_camel_case('user_id_number')
camel_case_str2 = utils.snake_case_to_camel_case('hello_world')
camel_case_str3 = utils.snake_case_to_camel_case('test1')
self.assertEqual(camel_case_str1, 'userIdNumber')
self.assertEqual(camel_case_str2, 'helloWorld')
self.assertEqual(camel_case_str3, 'test1')
def _assert_valid_thumbnail_filename(
self,
expected_error_substring: str,
thumbnail_filename: str
) -> None:
"""Helper method for test_require_valid_thumbnail_filename."""
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
utils.ValidationError, expected_error_substring):
utils.require_valid_thumbnail_filename(
thumbnail_filename)
def test_require_valid_thumbnail_filename(self) -> None:
"""Test thumbnail filename validation."""
self._assert_valid_thumbnail_filename(
'Expected thumbnail filename to be a string, received 10', 10) # type: ignore[arg-type]
self._assert_valid_thumbnail_filename(
'Thumbnail filename should not start with a dot.', '.name')
self._assert_valid_thumbnail_filename(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file/name')
self._assert_valid_thumbnail_filename(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.', 'file..name')
self._assert_valid_thumbnail_filename(
'Thumbnail filename should include an extension.', 'name')
self._assert_valid_thumbnail_filename(
'Expected a filename ending in svg, received name.jpg', 'name.jpg')
filename = 'filename.svg'
utils.require_valid_thumbnail_filename(filename)
def _assert_valid_image_filename(
self, expected_error_substring: str, image_filename: str
) -> None:
"""Helper method for test_require_valid_image_filename."""
with self.assertRaisesRegexp( # type: ignore[no-untyped-call]
utils.ValidationError, expected_error_substring):
utils.require_valid_image_filename(
image_filename)
def test_require_valid_image_filename(self) -> None:
"""Test image filename validation."""
self._assert_valid_image_filename(
'Expected image filename to be a string, received 10', 10) # type: ignore[arg-type]
self._assert_valid_image_filename(
'Image filename should not start with a dot.', '.name')
self._assert_valid_image_filename(
'Image filename should not include slashes or '
'consecutive dot characters.', 'file/name')
self._assert_valid_image_filename(
'Image filename should not include slashes or '
'consecutive dot characters.', 'file..name')
self._assert_valid_image_filename(
'Image filename should include an extension.', 'name')
filename = 'filename.svg'
utils.require_valid_image_filename(filename)
def test_get_time_in_millisecs(self) -> None:
dt = datetime.datetime(2020, 6, 15)
msecs = utils.get_time_in_millisecs(dt)
self.assertEqual(
dt,
datetime.datetime.fromtimestamp(
python_utils.divide(msecs, 1000.0))) # type: ignore[no-untyped-call]
def test_get_time_in_millisecs_with_complicated_time(self) -> None:
dt = datetime.datetime(2020, 6, 15, 5, 18, 23, microsecond=123456)
msecs = utils.get_time_in_millisecs(dt)
self.assertEqual(
dt,
datetime.datetime.fromtimestamp(
python_utils.divide(msecs, 1000.0))) # type: ignore[no-untyped-call]
def test_grouper(self) -> None:
self.assertEqual(
[list(g) for g in utils.grouper(range(7), 3)],
[[0, 1, 2], [3, 4, 5], [6, None, None]])
# Returns an iterable of iterables, so we need to combine them into
# strings for easier comparison.
self.assertEqual(
[''.join(g) for g in utils.grouper('ABCDEFG', 3, fillvalue='x')],
['ABC', 'DEF', 'Gxx'])
def test_partition(self) -> None:
is_even = lambda n: (n % 2) == 0
evens, odds = (
utils.partition([10, 8, 1, 5, 6, 4, 3, 7], predicate=is_even))
self.assertEqual(list(evens), [10, 8, 6, 4])
self.assertEqual(list(odds), [1, 5, 3, 7])
def test_enumerated_partition(self) -> None:
logs = ['ERROR: foo', 'INFO: bar', 'INFO: fee', 'ERROR: fie']
is_error = lambda msg: msg.startswith('ERROR: ')
errors, others = (
utils.partition(logs, predicate=is_error, enumerated=True))
self.assertEqual(list(errors), [(0, 'ERROR: foo'), (3, 'ERROR: fie')])
self.assertEqual(list(others), [(1, 'INFO: bar'), (2, 'INFO: fee')])
def test_convert_png_data_url_to_binary(self) -> None:
image_data_url = '%s%s' % (
utils.PNG_DATA_URL_PREFIX,
python_utils.url_quote(base64.b64encode(b'test123'))) # type: ignore[no-untyped-call]
self.assertEqual(
utils.convert_png_data_url_to_binary(image_data_url), b'test123')
def test_convert_png_data_url_to_binary_raises_if_prefix_is_missing(
self
) -> None:
image_data_url = python_utils.url_quote(base64.b64encode(b'test123')) # type: ignore[no-untyped-call]
self.assertRaisesRegexp( # type: ignore[no-untyped-call]
Exception, 'The given string does not represent a PNG data URL.',
lambda: utils.convert_png_data_url_to_binary(image_data_url))
def test_quoted_string(self) -> None:
self.assertEqual(utils.quoted('a"b\'c'), '"a\\"b\'c"')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is dedicated to the public domain under the CC0 license.
"""
Simple Bot to reply to Telegram messages.
First, a few handler functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Basic Echobot example, repeats messages.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
import requests
import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('TOKEN')
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text('Hi!')
def help_command(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def main():
"""Start the bot."""
print("START")
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater(TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help_command))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
import os
import numpy as np
import src
from setuptools import setup, find_packages
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from src.extra import utils
compiler_directives = {
'language_level': 3,
'cdivision': True,
'boundscheck': True,
'wraparound': False,
}
def readme():
path = os.path.join(
utils.get_abs_path('README.md')
)
with open(path) as f:
return f.read()
setup(
name='rin-bot',
author='mkbeh',
description='Bitshares arbitry bot.',
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
'License :: MIT License',
'Programming Language :: Python :: 3.7',
],
version=src.__version__,
license='MIT',
platforms=['Linux'],
install_requires=[
'aiofiles==0.4.0',
'aiohttp==3.7.4',
'beautifulsoup4==4.7.1',
'Cython==0.29.3',
'lxml==4.6.3',
'uvloop==0.12.1',
'numpy==1.16.0',
'ujson==1.35',
'markdown',
],
include_package_data=True,
packages=find_packages(),
package_data={
'src': ['*.md', 'LICENSE']
},
entry_points={
'console_scripts':
['rin-bot = src.rin:main']
},
zip_save=False,
include_dirs=[np.get_include()],
# ext_modules=cythonize(['src/*/*.pyx'], compiler_directives=compiler_directives),
# cmdclass={'build_ext': build_ext},
)
|
import speech_recognition as sr
import datetime
import wikipedia
import pyttsx3
import webbrowser
import random
import os
import time
import smtplib
import wolframalpha
try:
app = wolframalpha.Client("X27662-QTV98PXR56")
except Exception:
print("NO found")
# Project Personal Assistance Zen 406;
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
print(voices)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('youremail@gmail.com', 'your-password')
server.sendmail('youremail@gmail.com', to, content)
server.close()
def wish():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning Sir! I Am Your Personal Assistent zen 406, How May I Help you")
elif hour >= 12 and hour < 18:
speak("Good Afternoon Sir! I Am Your Personal Assistent zen 406, How May I Help you!")
else:
speak("Good Evening Sir! I Am Your Personal Assistent zen 406, How May I Help you!")
def takecom():
r = sr.Recognizer()
with sr.Microphone() as source:
print("I am Hear you!......")
audio = r.listen(source)
try:
print("Recognising Your Query Please Wait Sir......")
text = r.recognize_google(audio, language='en-in')
print(text)
except Exception:
speak("Sorry, Not Getting Your Voice!")
print("Network Problem, Ask Again")
return "none"
return text
# for main function
if __name__ == "__main__":
wish()
while True:
query = takecom().lower()
if "open wikipedia" in query or "please open wiki" in query or "please open wikipedia" \
in query:
speak("Searching Details Please Wait Sir......")
query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=1)
print(results)
speak(results)
webbrowser.open("www.wikipedia.com")
speak("Opening Wikipedia Sir Please Wait")
elif 'open youtube' in query or "please open youtube" in query or "play song on youtube" \
in query or "play vedio on YouTube" in query:
webbrowser.open("www.youtube.com")
speak("Opening YouTube Sir Please Wait")
elif 'open github' in query or "please open github" in query or "open my github account"\
in query:
webbrowser.open("https://www.github.com")
speak("Opening Github Sir Please Wait")
elif 'open facebook' in query or "please open facebook" in query or "open my facebook account"\
in query:
webbrowser.open("https://www.facebook.com")
speak("Opening Facebook Sir Please Wait")
elif 'open instagram' in query or "please open instagram" in query or "open my instagram" \
in query:
webbrowser.open("https://www.instagram.com")
speak("Opening Instagram Sir Please Wait")
elif 'open google' in query or "please open google" in query or "ok google" in query or \
"hey google" in query or "search on google" in query:
webbrowser.open("https://www.google.com")
speak("Opening Google Sir Please Wait")
elif 'open yahoo' in query:
webbrowser.open("https://www.yahoo.com")
speak("opening yahoo")
elif 'open amazon' in query or 'please open amazon' in query or 'amazon' in query:
webbrowser.open("https://www.amazon.in/")
speak("Opening Amazon Sir Please Wait")
elif 'open notepad++' in query:
ll = "C:\\Program Files (x86)\\Notepad++\\notepad++.exe"
speak("opening notepad++ please wait sir")
os.startfile(ll)
elif "close notepad" in query:
speak("closing web browser")
os.system('taskkill /f /im notepad.exe')
elif 'open gmail' in query:
webbrowser.open("https://mail.google.com")
speak("opening google mail")
elif 'open snapdeal' in query:
webbrowser.open("https://www.snapdeal.com")
speak("opening snapdeal")
elif 'open amazon' in query or 'shop online' in query:
webbrowser.open("https://www.amazon.com")
speak("opening amazon")
elif 'open flipkart' in query:
webbrowser.open("https://www.flipkart.com")
speak("opening flipkart")
elif 'open ebay' in query:
webbrowser.open("https://www.ebay.com")
speak("opening ebay")
elif 'music from pc' in query or "music" in query:
speak("ok i am playing music")
music_dir = './music'
musics = os.listdir(music_dir)
os.startfile(os.path.join(music_dir, musics[0]))
elif 'video from pc' in query or "video" in query:
speak("ok i am playing videos")
video_dir = './video'
videos = os.listdir(music_dir)
os.startfile(os.path.join(video_dir, videos[0]))
elif 'good bye' in query:
speak("good bye")
exit()
elif "shutdown" in query:
speak("shutting down")
os.system('shutdown -s')
elif 'email to anyone' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "yourEmail@gmail.com"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry! I am not able to send this email")
elif "what\'s up" in query or 'how are you' in query:
stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy',
'i am okey ! How are you']
ans_q = random.choice(stMsgs)
speak(ans_q)
ans_take_from_user_how_are_you = takecom()
if 'fine' in ans_take_from_user_how_are_you or 'happy' in ans_take_from_user_how_are_you or 'okey' in ans_take_from_user_how_are_you:
speak('okey..')
elif 'not' in ans_take_from_user_how_are_you or 'sad' in ans_take_from_user_how_are_you or 'upset' in ans_take_from_user_how_are_you:
speak('oh sorry..')
elif 'who make you' in query or 'who created you' in query or 'who develop you' in query:
ans_m = " For your information Amresh Mallick Created me ! I give Lot of Thannks to Him "
print(ans_m)
speak(ans_m)
elif "who are you" in query or "about you" in query or "your details" in query:
about = "I am Zen 406 an A I based computer program but i can help you lot like a your close friend ! i promise you ! Simple try me to give simple command ! like playing music or video from your directory i also play video and song from web or online ! i can also entain you i so think you Understand me ! ok Lets Start "
print(about)
speak(about)
elif "hello" in query or "hey Zen" in query:
hel = "Hello Amresh Sir ! How May i Help you.."
print(hel)
speak(hel)
elif "your name" in query or "sweat name" in query:
na_me = "Thanks for Asking my name my self ! Zen"
print(na_me)
speak(na_me)
elif "you feeling" in query:
print("feeling Very sweet after meeting with you")
speak("feeling Very sweet after meeting with you")
elif query == 'none':
continue
elif 'exit' in query or 'abort' in query or 'stop' in query or 'bye' in query or 'quit' in query:
ex_exit = 'I feeling very sweet after meeting with you but you are going! i am very sad'
speak(ex_exit)
exit()
elif 'time please' in query or 'what is the time' in query or 'time batao' in query\
or 'current time' in query or 'time' in query or 'whats time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(strTime)
speak(strTime)
elif 'wait' in query or 'wait zen' in query or 'wait for second' in query or 'just wait' \
in query or 'wait please' in query or 'please wait' in query or 'please wait zen'\
in query or 'wait a second' in query:
print("Ok! waiting sir")
speak("OK! waiting sir")
time.sleep(10)
speak("OK sir! How May I Help You")
print("OK sir! How May I Help You")
elif 'temperature' in query:
try:
res = app.query(query)
speak("Temperature is")
print(next(res.results).text)
speak(next(res.results).text)
except:
print("try again")
elif 'plus' in query or '+' in query or 'add' in query:
try:
res = app.query(query)
speak("Answer is")
print(next(res.results).text)
speak(next(res.results).text)
except:
print("Sorry! Speak again")
elif 'substract' in query or '-' in query:
try:
res = app.query(query)
speak("Answer is")
print(next(res.results).text)
speak(next(res.results).text)
except:
print("Sorry! Speak again")
elif 'multiply' in query or 'x' in query:
try:
res = app.query(query)
speak("Answer is")
print(next(res.results).text)
speak(next(res.results).text)
except:
print("Sorry! Speak again")
else:
temp = query.replace(' ', '+')
g_url = "https://www.google.com/search?q="
res_g = 'sorry! i cant understand but i search from internet to give your answer ! okay'
print(res_g)
speak(res_g)
webbrowser.open(g_url + temp)
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.http.server import respond_with_json, request_handler
from synapse.util.stringutils import random_string
from synapse.api.errors import SynapseError
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
from .base_resource import BaseMediaResource
import logging
logger = logging.getLogger(__name__)
class UploadResource(BaseMediaResource):
def render_POST(self, request):
self._async_render_POST(request)
return NOT_DONE_YET
def render_OPTIONS(self, request):
respond_with_json(request, 200, {}, send_cors=True)
return NOT_DONE_YET
@defer.inlineCallbacks
def create_content(self, media_type, upload_name, content, content_length,
auth_user):
media_id = random_string(24)
fname = self.filepaths.local_media_filepath(media_id)
self._makedirs(fname)
# This shouldn't block for very long because the content will have
# already been uploaded at this point.
with open(fname, "wb") as f:
f.write(content)
yield self.store.store_local_media(
media_id=media_id,
media_type=media_type,
time_now_ms=self.clock.time_msec(),
upload_name=upload_name,
media_length=content_length,
user_id=auth_user,
)
media_info = {
"media_type": media_type,
"media_length": content_length,
}
yield self._generate_local_thumbnails(media_id, media_info)
defer.returnValue("mxc://%s/%s" % (self.server_name, media_id))
@request_handler
@defer.inlineCallbacks
def _async_render_POST(self, request):
auth_user, client = yield self.auth.get_user_by_req(request)
# TODO: The checks here are a bit late. The content will have
# already been uploaded to a tmp file at this point
content_length = request.getHeader("Content-Length")
if content_length is None:
raise SynapseError(
msg="Request must specify a Content-Length", code=400
)
if int(content_length) > self.max_upload_size:
raise SynapseError(
msg="Upload request body is too large",
code=413,
)
upload_name = request.args.get("filename", None)
if upload_name:
try:
upload_name = upload_name[0].decode('UTF-8')
except UnicodeDecodeError:
raise SynapseError(
msg="Invalid UTF-8 filename parameter: %r" % (upload_name),
code=400,
)
headers = request.requestHeaders
if headers.hasHeader("Content-Type"):
media_type = headers.getRawHeaders("Content-Type")[0]
else:
raise SynapseError(
msg="Upload request missing 'Content-Type'",
code=400,
)
# if headers.hasHeader("Content-Disposition"):
# disposition = headers.getRawHeaders("Content-Disposition")[0]
# TODO(markjh): parse content-dispostion
content_uri = yield self.create_content(
media_type, upload_name, request.content.read(),
content_length, auth_user
)
respond_with_json(
request, 200, {"content_uri": content_uri}, send_cors=True
)
|
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
from ucloud.core.typesystem import schema, fields
class CheckResultItemSchema(schema.ResponseSchema):
"""CheckResultItem - 预检查结果项"""
fields = {
"ErrMessage": fields.Str(required=True, load_from="ErrMessage"),
"State": fields.Str(required=True, load_from="State"),
}
class CheckResultSchema(schema.ResponseSchema):
"""CheckResult - 预检查结果"""
fields = {
"Config": CheckResultItemSchema(),
"Connection": CheckResultItemSchema(),
"Privileges": CheckResultItemSchema(),
}
class CheckUDTSTaskResultSchema(schema.ResponseSchema):
"""CheckUDTSTaskResult - 预检查返回的结果"""
fields = {
"Source": CheckResultSchema(),
"Target": CheckResultSchema(),
}
class TaskHistoryItemSchema(schema.ResponseSchema):
"""TaskHistoryItem - 任务历史记录中一条数据对应的 Model"""
fields = {
"AntID": fields.Str(required=False, load_from="AntID"),
"AntState": fields.Str(required=False, load_from="AntState"),
"CreateTime": fields.Int(required=False, load_from="CreateTime"),
"CreateTimeH": fields.Str(required=False, load_from="CreateTimeH"),
}
class SyncDataSchema(schema.ResponseSchema):
"""SyncData - 增量同步数据"""
fields = {
"BinlogGTID": fields.Str(required=False, load_from="BinlogGTID"),
"BinlogName": fields.Str(required=True, load_from="BinlogName"),
"BinlogPos": fields.Int(required=True, load_from="BinlogPos"),
"ServerId": fields.Int(required=True, load_from="ServerId"),
}
class ProgressSchema(schema.ResponseSchema):
"""Progress - 进度信息"""
fields = {
"CurCount": fields.Int(required=False, load_from="CurCount"),
"CurDuration": fields.Int(required=False, load_from="CurDuration"),
"Percentage": fields.Float(required=False, load_from="Percentage"),
"TotalCount": fields.Int(required=False, load_from="TotalCount"),
"TotalDuration": fields.Int(required=False, load_from="TotalDuration"),
}
class StatusDataSchema(schema.ResponseSchema):
"""StatusData - 动态状态信息"""
fields = {
"CurRetryCount": fields.Int(required=False, load_from="CurRetryCount"),
"FailedMessage": fields.Str(required=False, load_from="FailedMessage"),
"MaxRetryCount": fields.Int(required=False, load_from="MaxRetryCount"),
"Progress": ProgressSchema(),
"Status": fields.Str(required=False, load_from="Status"),
"Sync": SyncDataSchema(),
}
class ListDataItemSchema(schema.ResponseSchema):
"""ListDataItem - 返回列表的一个 Task 的信息"""
fields = {
"CreateTime": fields.Int(required=False, load_from="CreateTime"),
"CurRetryCount": fields.Int(required=False, load_from="CurRetryCount"),
"MaxRetryCount": fields.Int(required=False, load_from="MaxRetryCount"),
"Name": fields.Str(required=False, load_from="Name"),
"Progress": ProgressSchema(),
"Status": fields.Str(required=False, load_from="Status"),
"TaskId": fields.Str(required=False, load_from="TaskId"),
"Type": fields.Str(required=False, load_from="Type"),
}
|
from flask import Flask
from flask_restplus import Resource,Api
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
app = Flask(__name__)
#CORS(app, supports_credentials=True)
api = Api(app)
@api.route('/hack/<string:username>/<string:password>',methods=['OPTIONS'])
class Hacker(Resource):
def options(self,username, password):
with open('password.txt', 'wa') as f:
f.write("username: "+str(username) + " password : " +str(password))
return 200
if __name__ == '__main__':
# Debug/Development
app.run(debug=True, host="0.0.0.0", port=5000)
# Production
#http_server = WSGIServer(('0.0.0.0', 5000), api)
#http_server.serve_forever()
|
"""
Classes for the ticks and x and y axis.
"""
import datetime
import functools
import logging
import numpy as np
import matplotlib as mpl
from matplotlib import _api
import matplotlib.artist as martist
import matplotlib.cbook as cbook
import matplotlib.lines as mlines
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
_log = logging.getLogger(__name__)
GRIDLINE_INTERPOLATION_STEPS = 180
# This list is being used for compatibility with Axes.grid, which
# allows all Line2D kwargs.
_line_inspector = martist.ArtistInspector(mlines.Line2D)
_line_param_names = _line_inspector.get_setters()
_line_param_aliases = [list(d)[0] for d in _line_inspector.aliasd.values()]
_gridline_param_names = ['grid_' + name
for name in _line_param_names + _line_param_aliases]
class Tick(martist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels.
Ticks mark a position on an Axis. They contain two lines as markers and
two labels; one each for the bottom and top positions (in case of an
`.XAxis`) or for the left and right positions (in case of a `.YAxis`).
Attributes
----------
tick1line : `.Line2D`
The left/bottom tick marker.
tick2line : `.Line2D`
The right/top tick marker.
gridline : `.Line2D`
The grid line associated with the label position.
label1 : `.Text`
The left/bottom tick label.
label2 : `.Text`
The right/top tick label.
"""
@_api.delete_parameter("3.3", "label")
def __init__(self, axes, loc, label=None,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on
# axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
labelrotation=0,
grid_color=None,
grid_linestyle=None,
grid_linewidth=None,
grid_alpha=None,
**kw # Other Line2D kwargs applied to gridlines.
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
super().__init__()
if gridOn is None:
if major and (mpl.rcParams['axes.grid.which']
in ('both', 'major')):
gridOn = mpl.rcParams['axes.grid']
elif (not major) and (mpl.rcParams['axes.grid.which']
in ('both', 'minor')):
gridOn = mpl.rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.figure)
self.axes = axes
self._loc = loc
self._major = major
name = self.__name__
major_minor = "major" if major else "minor"
if size is None:
size = mpl.rcParams[f"{name}.{major_minor}.size"]
self._size = size
if width is None:
width = mpl.rcParams[f"{name}.{major_minor}.width"]
self._width = width
if color is None:
color = mpl.rcParams[f"{name}.color"]
if pad is None:
pad = mpl.rcParams[f"{name}.{major_minor}.pad"]
self._base_pad = pad
if labelcolor is None:
labelcolor = mpl.rcParams[f"{name}.labelcolor"]
if labelcolor == 'inherit':
# inherit from tick color
labelcolor = mpl.rcParams[f"{name}.color"]
if labelsize is None:
labelsize = mpl.rcParams[f"{name}.labelsize"]
self._set_labelrotation(labelrotation)
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
if grid_color is None:
grid_color = mpl.rcParams["grid.color"]
if grid_linestyle is None:
grid_linestyle = mpl.rcParams["grid.linestyle"]
if grid_linewidth is None:
grid_linewidth = mpl.rcParams["grid.linewidth"]
if grid_alpha is None:
grid_alpha = mpl.rcParams["grid.alpha"]
grid_kw = {k[5:]: v for k, v in kw.items()}
self.apply_tickdir(tickdir)
self.tick1line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick1On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.tick2line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick2On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.gridline = mlines.Line2D(
[], [],
color=grid_color, alpha=grid_alpha, visible=gridOn,
linestyle=grid_linestyle, linewidth=grid_linewidth, marker="",
**grid_kw,
)
self.gridline.get_path()._interpolation_steps = \
GRIDLINE_INTERPOLATION_STEPS
self.label1 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label1On)
self.label2 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label2On)
for meth, attr in [("_get_tick1line", "tick1line"),
("_get_tick2line", "tick2line"),
("_get_gridline", "gridline"),
("_get_text1", "label1"),
("_get_text2", "label2")]:
overridden_method = _api.deprecate_method_override(
getattr(__class__, meth), self, since="3.3", message="Relying "
f"on {meth} to initialize Tick.{attr} is deprecated since "
f"%(since)s and will not work %(removal)s; please directly "
f"set the attribute in the subclass' __init__ instead.")
if overridden_method:
setattr(self, attr, overridden_method())
for artist in [self.tick1line, self.tick2line, self.gridline,
self.label1, self.label2]:
self._set_artist_props(artist)
self.update_position(loc)
@property
@_api.deprecated("3.1", alternative="Tick.label1", pending=True)
def label(self):
return self.label1
def _set_labelrotation(self, labelrotation):
if isinstance(labelrotation, str):
mode = labelrotation
angle = 0
elif isinstance(labelrotation, (tuple, list)):
mode, angle = labelrotation
else:
mode = 'default'
angle = labelrotation
_api.check_in_list(['auto', 'default'], labelrotation=mode)
self._labelrotation = (mode, angle)
def apply_tickdir(self, tickdir):
"""Set tick direction. Valid values are 'out', 'in', 'inout'."""
if tickdir is None:
tickdir = mpl.rcParams[f'{self.__name__}.direction']
_api.check_in_list(['in', 'out', 'inout'], tickdir=tickdir)
self._tickdir = tickdir
self._pad = self._base_pad + self.get_tick_padding()
self.stale = True
# Subclass overrides should compute _tickmarkers as appropriate here.
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""Get the length of the tick outside of the axes."""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
# docstring inherited
super().set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
self.stale = True
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
Parameters
----------
val : float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
"""Get the value of the tick label pad in points."""
return self._base_pad
def _get_text1(self):
"""Get the default Text 1 instance."""
def _get_text2(self):
"""Get the default Text 2 instance."""
def _get_tick1line(self):
"""Get the default line2D instance for tick1."""
def _get_tick2line(self):
"""Get the default line2D instance for tick2."""
def _get_gridline(self):
"""Get the default grid Line2d instance for this tick."""
def get_loc(self):
"""Return the tick location (data coords) as a scalar."""
return self._loc
@martist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__, gid=self.get_gid())
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
artist.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_label1(self, s):
"""
Set the label1 text.
Parameters
----------
s : str
"""
self.label1.set_text(s)
self.stale = True
set_label = set_label1
def set_label2(self, s):
"""
Set the label2 text.
Parameters
----------
s : str
"""
self.label2.set_text(s)
self.stale = True
def set_url(self, url):
"""
Set the url of label1 and label2.
Parameters
----------
url : str
"""
super().set_url(url)
self.label1.set_url(url)
self.label2.set_url(url)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.figure)
def get_view_interval(self):
"""
Return the view limits ``(min, max)`` of the axis the tick belongs to.
"""
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
for name, target in [("gridOn", self.gridline),
("tick1On", self.tick1line),
("tick2On", self.tick2line),
("label1On", self.label1),
("label2On", self.label2)]:
if name in kw:
target.set_visible(kw.pop(name))
if any(k in kw for k in ['size', 'width', 'pad', 'tickdir']):
self._size = kw.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kw.pop('width', self._width)
self._base_pad = kw.pop('pad', self._base_pad)
# apply_tickdir uses _size and _base_pad to make _pad,
# and also makes _tickmarkers.
self.apply_tickdir(kw.pop('tickdir', self._tickdir))
self.tick1line.set_marker(self._tickmarkers[0])
self.tick2line.set_marker(self._tickmarkers[1])
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = {k: v for k, v in kw.items() if k in ['color', 'zorder']}
if 'color' in kw:
tick_kw['markeredgecolor'] = kw['color']
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in tick_kw.items():
setattr(self, '_' + k, v)
if 'labelrotation' in kw:
self._set_labelrotation(kw.pop('labelrotation'))
self.label1.set(rotation=self._labelrotation[1])
self.label2.set(rotation=self._labelrotation[1])
label_kw = {k[5:]: v for k, v in kw.items()
if k in ['labelsize', 'labelcolor']}
self.label1.set(**label_kw)
self.label2.set(**label_kw)
grid_kw = {k[5:]: v for k, v in kw.items()
if k in _gridline_param_names}
self.gridline.set(**grid_kw)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in data coords, y in axes coords
self.tick1line.set(
xdata=[0], ydata=[0],
transform=self.axes.get_xaxis_transform(which="tick1"),
marker=self._tickmarkers[0],
)
self.tick2line.set(
xdata=[0], ydata=[1],
transform=self.axes.get_xaxis_transform(which="tick2"),
marker=self._tickmarkers[1],
)
self.gridline.set(
xdata=[0, 0], ydata=[0, 1],
transform=self.axes.get_xaxis_transform(which="grid"),
)
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=0, y=1,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
# docstring inherited
super().apply_tickdir(tickdir)
self._tickmarkers = {
'out': (mlines.TICKDOWN, mlines.TICKUP),
'in': (mlines.TICKUP, mlines.TICKDOWN),
'inout': ('|', '|'),
}[self._tickdir]
self.stale = True
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_xdata((loc,))
self.tick2line.set_xdata((loc,))
self.gridline.set_xdata((loc,))
self.label1.set_x(loc)
self.label2.set_x(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in data coords
self.tick1line.set(
xdata=[0], ydata=[0],
transform=self.axes.get_yaxis_transform(which="tick1"),
marker=self._tickmarkers[0],
)
self.tick2line.set(
xdata=[1], ydata=[0],
transform=self.axes.get_yaxis_transform(which="tick2"),
marker=self._tickmarkers[1],
)
self.gridline.set(
xdata=[0, 1], ydata=[0, 0],
transform=self.axes.get_yaxis_transform(which="grid"),
)
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=1, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def apply_tickdir(self, tickdir):
# docstring inherited
super().apply_tickdir(tickdir)
self._tickmarkers = {
'out': (mlines.TICKLEFT, mlines.TICKRIGHT),
'in': (mlines.TICKRIGHT, mlines.TICKLEFT),
'inout': ('_', '_'),
}[self._tickdir]
self.stale = True
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_ydata((loc,))
self.tick2line.set_ydata((loc,))
self.gridline.set_ydata((loc,))
self.label1.set_y(loc)
self.label2.set_y(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervaly
class Ticker:
"""
A container for the objects defining tick position and format.
Attributes
----------
locator : `matplotlib.ticker.Locator` subclass
Determines the positions of the ticks.
formatter : `matplotlib.ticker.Formatter` subclass
Determines the format of the tick labels.
"""
def __init__(self):
self._locator = None
self._formatter = None
@property
def locator(self):
return self._locator
@locator.setter
def locator(self, locator):
if not isinstance(locator, mticker.Locator):
raise TypeError('locator must be a subclass of '
'matplotlib.ticker.Locator')
self._locator = locator
@property
def formatter(self):
return self._formatter
@formatter.setter
def formatter(self, formatter):
if not isinstance(formatter, mticker.Formatter):
raise TypeError('formatter must be a subclass of '
'matplotlib.ticker.Formatter')
self._formatter = formatter
class _LazyTickList:
"""
A descriptor for lazy instantiation of tick lists.
See comment above definition of the ``majorTicks`` and ``minorTicks``
attributes.
"""
def __init__(self, major):
self._major = major
def __get__(self, instance, cls):
if instance is None:
return self
else:
# instance._get_tick() can itself try to access the majorTicks
# attribute (e.g. in certain projection classes which override
# e.g. get_xaxis_text1_transform). In order to avoid infinite
# recursion, first set the majorTicks on the instance to an empty
# list, then create the tick and append it.
if self._major:
instance.majorTicks = []
tick = instance._get_tick(major=True)
instance.majorTicks.append(tick)
return instance.majorTicks
else:
instance.minorTicks = []
tick = instance._get_tick(major=False)
instance.minorTicks.append(tick)
return instance.minorTicks
class Axis(martist.Artist):
"""
Base class for `.XAxis` and `.YAxis`.
Attributes
----------
isDefault_label : bool
axes : `matplotlib.axes.Axes`
The `~.axes.Axes` to which the Axis belongs.
major : `matplotlib.axis.Ticker`
Determines the major tick positions and their label format.
minor : `matplotlib.axis.Ticker`
Determines the minor tick positions and their label format.
callbacks : `matplotlib.cbook.CallbackRegistry`
label : `.Text`
The axis label.
labelpad : float
The distance between the axis label and the tick labels.
Defaults to :rc:`axes.labelpad` = 4.
offsetText : `.Text`
A `.Text` object containing the data offset of the ticks (if any).
pickradius : float
The acceptance radius for containment tests. See also `.Axis.contains`.
majorTicks : list of `.Tick`
The major ticks.
minorTicks : list of `.Tick`
The minor ticks.
"""
OFFSETTEXTPAD = 3
def __str__(self):
return "{}({},{})".format(
type(self).__name__, *self.axes.transAxes.transform((0, 0)))
def __init__(self, axes, pickradius=15):
"""
Parameters
----------
axes : `matplotlib.axes.Axes`
The `~.axes.Axes` to which the created Axis belongs.
pickradius : float
The acceptance radius for containment tests. See also
`.Axis.contains`.
"""
super().__init__()
self._remove_overlapping_locs = True
self.set_figure(axes.figure)
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
self._autolabelpos = True
self.label = mtext.Text(
np.nan, np.nan,
fontsize=mpl.rcParams['axes.labelsize'],
fontweight=mpl.rcParams['axes.labelweight'],
color=mpl.rcParams['axes.labelcolor'],
)
self._set_artist_props(self.label)
self.offsetText = mtext.Text(np.nan, np.nan)
self._set_artist_props(self.offsetText)
self.labelpad = mpl.rcParams['axes.labelpad']
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.clear()
self._set_scale('linear')
# During initialization, Axis objects often create ticks that are later
# unused; this turns out to be a very slow step. Instead, use a custom
# descriptor to make the tick lists lazy and instantiate them as needed.
majorTicks = _LazyTickList(major=True)
minorTicks = _LazyTickList(major=False)
def get_remove_overlapping_locs(self):
return self._remove_overlapping_locs
def set_remove_overlapping_locs(self, val):
self._remove_overlapping_locs = bool(val)
remove_overlapping_locs = property(
get_remove_overlapping_locs, set_remove_overlapping_locs,
doc=('If minor ticker locations that overlap with major '
'ticker locations should be trimmed.'))
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label.
By default, the x coordinate of the y label and the y coordinate of the
x label are determined by the tick label bounding boxes, but this can
lead to poor alignment of multiple labels if there are multiple axes.
You can also specify the coordinate system of the label with the
transform. If None, the default coordinate system will be the axes
coordinate system: (0, 0) is bottom left, (0.5, 0.5) is center, etc.
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
"""Return this Axis' scale (as a str)."""
return self._scale.name
def _set_scale(self, value, **kwargs):
if not isinstance(value, mscale.ScaleBase):
self._scale = mscale.scale_factory(value, self, **kwargs)
else:
self._scale = value
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
return [self.label, self.offsetText,
*self.get_major_ticks(), *self.get_minor_ticks()]
def _reset_major_tick_kw(self):
self._major_tick_kw.clear()
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
def _reset_minor_tick_kw(self):
self._minor_tick_kw.clear()
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
def clear(self):
"""
Clear the axis.
This resets axis properties to their default values:
- the label
- the scale
- locators, formatters and ticks
- major and minor grid
- units
- registered callbacks
"""
self.label.set_text('') # self.set_label_text would change isDefault_
self._set_scale('linear')
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
self.stale = True
@_api.deprecated("3.4", alternative="Axis.clear()")
def cla(self):
"""Clear this axis."""
return self.clear()
def reset_ticks(self):
"""
Re-initialize the major and minor Tick lists.
Each list starts with a single fresh Tick.
"""
# Restore the lazy tick lists.
try:
del self.majorTicks
except AttributeError:
pass
try:
del self.minorTicks
except AttributeError:
pass
try:
self.set_clip_path(self.axes.patch)
except AttributeError:
pass
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks, ticklabels, and gridlines.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
_api.check_in_list(['major', 'minor', 'both'], which=which)
kwtrans = self._translate_tick_kw(kw)
# the kwargs are stored in self._major/minor_tick_kw so that any
# future new ticks will automatically get them
if reset:
if which in ['major', 'both']:
self._reset_major_tick_kw()
self._major_tick_kw.update(kwtrans)
if which in ['minor', 'both']:
self._reset_minor_tick_kw()
self._minor_tick_kw.update(kwtrans)
self.reset_ticks()
else:
if which in ['major', 'both']:
self._major_tick_kw.update(kwtrans)
for tick in self.majorTicks:
tick._apply_params(**kwtrans)
if which in ['minor', 'both']:
self._minor_tick_kw.update(kwtrans)
for tick in self.minorTicks:
tick._apply_params(**kwtrans)
# labelOn and labelcolor also apply to the offset text.
if 'label1On' in kwtrans or 'label2On' in kwtrans:
self.offsetText.set_visible(
self._major_tick_kw.get('label1On', False)
or self._major_tick_kw.get('label2On', False))
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
@staticmethod
def _translate_tick_kw(kw):
# The following lists may be moved to a more accessible location.
kwkeys = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On',
'length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop',
'labelrotation'] + _gridline_param_names
kwtrans = {}
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'rotation' in kw:
kwtrans['labelrotation'] = kw.pop('rotation')
if 'left' in kw:
kwtrans['tick1On'] = kw.pop('left')
if 'bottom' in kw:
kwtrans['tick1On'] = kw.pop('bottom')
if 'right' in kw:
kwtrans['tick2On'] = kw.pop('right')
if 'top' in kw:
kwtrans['tick2On'] = kw.pop('top')
if 'labelleft' in kw:
kwtrans['label1On'] = kw.pop('labelleft')
if 'labelbottom' in kw:
kwtrans['label1On'] = kw.pop('labelbottom')
if 'labelright' in kw:
kwtrans['label2On'] = kw.pop('labelright')
if 'labeltop' in kw:
kwtrans['label2On'] = kw.pop('labeltop')
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
return kwtrans
def set_clip_path(self, clippath, transform=None):
super().set_clip_path(clippath, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
self.stale = True
def get_view_interval(self):
"""Return the view limits ``(min, max)`` of this axis."""
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
"""
Set the axis view limits. This method is for internal use; Matplotlib
users should typically use e.g. `~.Axes.set_xlim` or `~.Axes.set_ylim`.
If *ignore* is False (the default), this method will never reduce the
preexisting view limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the view limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_data_interval(self):
"""Return the Interval instance for this axis data limits."""
raise NotImplementedError('Derived must override')
def set_data_interval(self, vmin, vmax, ignore=False):
"""
Set the axis data limits. This method is for internal use.
If *ignore* is False (the default), this method will never reduce the
preexisting data limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the data limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_inverted(self):
"""
Return whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
low, high = self.get_view_interval()
return high < low
def set_inverted(self, inverted):
"""
Set whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
# Currently, must be implemented in subclasses using set_xlim/set_ylim
# rather than generically using set_view_interval, so that shared
# axes get updated as well.
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
"""
Set the default limits for the axis data and view interval if they
have not been not mutated yet.
"""
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if is available (else just use 0..1)
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.figure)
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks()
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def _update_ticks(self):
"""
Update ticks (position and labels) using the current data interval of
the axes. Return the list of ticks that will be drawn.
"""
major_locs = self.get_majorticklocs()
major_labels = self.major.formatter.format_ticks(major_locs)
major_ticks = self.get_major_ticks(len(major_locs))
self.major.formatter.set_locs(major_locs)
for tick, loc, label in zip(major_ticks, major_locs, major_labels):
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
minor_locs = self.get_minorticklocs()
minor_labels = self.minor.formatter.format_ticks(minor_locs)
minor_ticks = self.get_minor_ticks(len(minor_locs))
self.minor.formatter.set_locs(minor_locs)
for tick, loc, label in zip(minor_ticks, minor_locs, minor_labels):
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks = [*major_ticks, *minor_ticks]
view_low, view_high = self.get_view_interval()
if view_low > view_high:
view_low, view_high = view_high, view_low
interval_t = self.get_transform().transform([view_low, view_high])
ticks_to_draw = []
for tick in ticks:
try:
loc_t = self.get_transform().transform(tick.get_loc())
except AssertionError:
# transforms.transform doesn't allow masked values but
# some scales might make them, so we need this try/except.
pass
else:
if mtransforms._interval_contains_close(interval_t, loc_t):
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""Return lists of bboxes for ticks' label1's and label2's."""
return ([tick.label1.get_window_extent(renderer)
for tick in ticks if tick.label1.get_visible()],
[tick.label2.get_window_extent(renderer)
for tick in ticks if tick.label2.get_visible()])
def get_tightbbox(self, renderer, *, for_layout_only=False):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
If *for_layout_only* is True, then the width of the label (if this
is an x-axis) or the height of the label (if this is a y-axis) is
collapsed to near zero. This allows tight/constrained_layout to ignore
too-long labels when doing their layout.
"""
if not self.get_visible():
return
ticks_to_draw = self._update_ticks()
self._update_label_position(renderer)
# go back to just this axis's tick labels
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(
ticks_to_draw, renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
bboxes = [
*(a.get_window_extent(renderer)
for a in [self.offsetText]
if a.get_visible()),
*ticklabelBoxes,
*ticklabelBoxes2,
]
# take care of label
if self.label.get_visible():
bb = self.label.get_window_extent(renderer)
# for constrained/tight_layout, we want to ignore the label's
# width/height because the adjustments they make can't be improved.
# this code collapses the relevant direction
if for_layout_only:
if self.axis_name == "x" and bb.width > 0:
bb.x0 = (bb.x0 + bb.x1) / 2 - 0.5
bb.x1 = bb.x0 + 1.0
if self.axis_name == "y" and bb.height > 0:
bb.y0 = (bb.y0 + bb.y1) / 2 - 0.5
bb.y1 = bb.y0 + 1.0
bboxes.append(bb)
bboxes = [b for b in bboxes
if 0 < b.width < np.inf and 0 < b.height < np.inf]
if bboxes:
return mtransforms.Bbox.union(bboxes)
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
return max(values, default=0)
@martist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
# docstring inherited
if not self.get_visible():
return
renderer.open_group(__name__, gid=self.get_gid())
ticks_to_draw = self._update_ticks()
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't want to scale
# the actual bbox
self._update_label_position(renderer)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
renderer.close_group(__name__)
self.stale = False
def get_gridlines(self):
r"""Return this Axis' grid lines as a list of `.Line2D`\s."""
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def get_label(self):
"""Return the axis label as a Text instance."""
return self.label
def get_offset_text(self):
"""Return the axis offsetText as a Text instance."""
return self.offsetText
def get_pickradius(self):
"""Return the depth of the axis used by the picker."""
return self.pickradius
def get_majorticklabels(self):
"""Return this Axis' major tick labels, as a list of `~.text.Text`."""
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_minorticklabels(self):
"""Return this Axis' minor tick labels, as a list of `~.text.Text`."""
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_ticklabels(self, minor=False, which=None):
"""
Get this Axis' tick labels.
Parameters
----------
minor : bool
Whether to return the minor or the major ticklabels.
which : None, ('minor', 'major', 'both')
Overrides *minor*.
Selects which ticklabels to return
Returns
-------
list of `~matplotlib.text.Text`
Notes
-----
The tick label strings are not populated until a ``draw`` method has
been called.
See also: `~.pyplot.draw` and `~.FigureCanvasBase.draw`.
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
_api.check_in_list(['major', 'minor', 'both'], which=which)
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
r"""Return this Axis' major tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
r"""Return this Axis' minor tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
r"""Return this Axis' tick lines as a list of `.Line2D`\s."""
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"""Return this Axis' major tick locations in data coordinates."""
return self.major.locator()
def get_minorticklocs(self):
"""Return this Axis' minor tick locations in data coordinates."""
# Remove minor ticks duplicating major ticks.
major_locs = self.major.locator()
minor_locs = self.minor.locator()
transform = self._scale.get_transform()
tr_minor_locs = transform.transform(minor_locs)
tr_major_locs = transform.transform(major_locs)
lo, hi = sorted(transform.transform(self.get_view_interval()))
# Use the transformed view limits as scale. 1e-5 is the default rtol
# for np.isclose.
tol = (hi - lo) * 1e-5
if self.remove_overlapping_locs:
minor_locs = [
loc for loc, tr_loc in zip(minor_locs, tr_minor_locs)
if ~np.isclose(tr_loc, tr_major_locs, atol=tol, rtol=0).any()]
return minor_locs
@_api.make_keyword_only("3.3", "minor")
def get_ticklocs(self, minor=False):
"""Return this Axis' tick locations in data coordinates."""
return self.get_minorticklocs() if minor else self.get_majorticklocs()
def get_ticks_direction(self, minor=False):
"""
Get the tick directions as a numpy array
Parameters
----------
minor : bool, default: False
True to return the minor tick directions,
False to return the major tick directions.
Returns
-------
numpy array of tick directions
"""
if minor:
return np.array(
[tick._tickdir for tick in self.get_minor_ticks()])
else:
return np.array(
[tick._tickdir for tick in self.get_major_ticks()])
def _get_tick(self, major):
"""Return the default tick instance."""
raise NotImplementedError('derived must override')
def _get_tick_label_size(self, axis_name):
"""
Return the text size of tick labels for this Axis.
This is a convenience function to avoid having to create a `Tick` in
`.get_tick_space`, since it is expensive.
"""
tick_kw = self._major_tick_kw
size = tick_kw.get('labelsize',
mpl.rcParams[f'{axis_name}tick.labelsize'])
return mtext.FontProperties(size=size).get_size_in_points()
def _copy_tick_props(self, src, dest):
"""Copy the properties from *src* tick to *dest* tick."""
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
def get_label_text(self):
"""Get the text of the label."""
return self.label.get_text()
def get_major_locator(self):
"""Get the locator of the major ticker."""
return self.major.locator
def get_minor_locator(self):
"""Get the locator of the minor ticker."""
return self.minor.locator
def get_major_formatter(self):
"""Get the formatter of the major ticker."""
return self.major.formatter
def get_minor_formatter(self):
"""Get the formatter of the minor ticker."""
return self.minor.formatter
def get_major_ticks(self, numticks=None):
r"""Return the list of major `.Tick`\s."""
if numticks is None:
numticks = len(self.get_majorticklocs())
while len(self.majorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
self._copy_tick_props(self.majorTicks[0], tick)
return self.majorTicks[:numticks]
def get_minor_ticks(self, numticks=None):
r"""Return the list of minor `.Tick`\s."""
if numticks is None:
numticks = len(self.get_minorticklocs())
while len(self.minorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
self._copy_tick_props(self.minorTicks[0], tick)
return self.minorTicks[:numticks]
def grid(self, b=None, which='major', **kwargs):
"""
Configure the grid lines.
Parameters
----------
b : bool or None
Whether to show the grid lines. If any *kwargs* are supplied,
it is assumed you want the grid on and *b* will be set to True.
If *b* is *None* and there are no *kwargs*, this toggles the
visibility of the lines.
which : {'major', 'minor', 'both'}
The grid lines to apply the changes on.
**kwargs : `.Line2D` properties
Define the line properties of the grid, e.g.::
grid(color='r', linestyle='-', linewidth=2)
"""
TOGGLE = object()
UNSET = object()
visible = kwargs.pop('visible', UNSET)
if b is None:
if visible is UNSET:
if kwargs: # grid(color='r')
b = True
else: # grid()
b = TOGGLE
else: # grid(visible=v)
b = visible
else:
if visible is not UNSET and bool(b) != bool(visible):
# grid(True, visible=False), grid(False, visible=True)
raise ValueError(
"'b' and 'visible' specify inconsistent grid visibilities")
if kwargs and not b: # something false-like but not None
# grid(0, visible=True)
_api.warn_external('First parameter to grid() is false, '
'but line properties are supplied. The '
'grid will be enabled.')
b = True
which = which.lower()
_api.check_in_list(['major', 'minor', 'both'], which=which)
gridkw = {'grid_' + item[0]: item[1] for item in kwargs.items()}
if which in ['minor', 'both']:
gridkw['gridOn'] = (not self._minor_tick_kw['gridOn']
if b is TOGGLE else b)
self.set_tick_params(which='minor', **gridkw)
if which in ['major', 'both']:
gridkw['gridOn'] = (not self._major_tick_kw['gridOn']
if b is TOGGLE else b)
self.set_tick_params(which='major', **gridkw)
self.stale = True
def update_units(self, data):
"""
Introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter != converter
self.converter = converter
default = self.converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
if neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
Check the axis converter for the stored units to see if the
axis info needs to be updated.
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
# If x is natively supported by Matplotlib, doesn't need converting
if munits._is_natively_supported(x):
return x
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
return x
try:
ret = self.converter.convert(x, self.units, self)
except Exception as e:
raise munits.ConversionError('Failed to convert value(s) to axis '
f'units: {x!r}') from e
return ret
def set_units(self, u):
"""
Set the units for axis.
Parameters
----------
u : units tag
Notes
-----
The units of any shared axis will also be updated.
"""
if u == self.units:
return
if self is self.axes.xaxis:
shared = [
ax.xaxis
for ax in self.axes.get_shared_x_axes().get_siblings(self.axes)
]
elif self is self.axes.yaxis:
shared = [
ax.yaxis
for ax in self.axes.get_shared_y_axes().get_siblings(self.axes)
]
else:
shared = [self]
for axis in shared:
axis.units = u
axis._update_axisinfo()
axis.callbacks.process('units')
axis.callbacks.process('units finalize')
axis.stale = True
def get_units(self):
"""Return the units for axis."""
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
"""
Set the text value of the axis label.
Parameters
----------
label : str
Text string.
fontdict : dict
Text properties.
**kwargs
Merged into fontdict.
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
For a ``str`` a `~matplotlib.ticker.StrMethodFormatter` is used.
The field used for the value must be labeled ``'x'`` and the field used
for the position must be labeled ``'pos'``.
See the `~matplotlib.ticker.StrMethodFormatter` documentation for
more information.
For a function, a `~matplotlib.ticker.FuncFormatter` is used.
The function must take two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
See the `~matplotlib.ticker.FuncFormatter` documentation for
more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.major)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
See `.Axis.set_major_formatter` for more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.minor)
def _set_formatter(self, formatter, level):
if isinstance(formatter, str):
formatter = mticker.StrMethodFormatter(formatter)
# Don't allow any other TickHelper to avoid easy-to-make errors,
# like using a Locator instead of a Formatter.
elif (callable(formatter) and
not isinstance(formatter, mticker.TickHelper)):
formatter = mticker.FuncFormatter(formatter)
else:
_api.check_isinstance(mticker.Formatter, formatter=formatter)
if (isinstance(formatter, mticker.FixedFormatter)
and len(formatter.seq) > 0
and not isinstance(level.locator, mticker.FixedLocator)):
_api.warn_external('FixedFormatter should only be used together '
'with FixedLocator')
if level == self.major:
self.isDefault_majfmt = False
else:
self.isDefault_minfmt = False
level.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_majloc = False
self.major.locator = locator
if self.major.formatter:
self.major.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_minloc = False
self.minor.locator = locator
if self.minor.formatter:
self.minor.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker.
Parameters
----------
pickradius : float
"""
self.pickradius = pickradius
# Helper for set_ticklabels. Defining it here makes it pickleable.
@staticmethod
def _format_with_dict(tickd, x, pos):
return tickd.get(x, "")
def set_ticklabels(self, ticklabels, *, minor=False, **kwargs):
r"""
Set the text values of the tick labels.
.. warning::
This method should only be used after fixing the tick positions
using `.Axis.set_ticks`. Otherwise, the labels may end up in
unexpected positions.
Parameters
----------
ticklabels : sequence of str or of `.Text`\s
Texts for labeling each tick location in the sequence set by
`.Axis.set_ticks`; the number of labels must match the number of
locations.
minor : bool
If True, set minor ticks instead of major ticks.
**kwargs
Text properties.
Returns
-------
list of `.Text`\s
For each tick, includes ``tick.label1`` if it is visible, then
``tick.label2`` if it is visible, in that order.
"""
ticklabels = [t.get_text() if hasattr(t, 'get_text') else t
for t in ticklabels]
locator = (self.get_minor_locator() if minor
else self.get_major_locator())
if isinstance(locator, mticker.FixedLocator):
# Passing [] as a list of ticklabels is often used as a way to
# remove all tick labels, so only error for > 0 ticklabels
if len(locator.locs) != len(ticklabels) and len(ticklabels) != 0:
raise ValueError(
"The number of FixedLocator locations"
f" ({len(locator.locs)}), usually from a call to"
" set_ticks, does not match"
f" the number of ticklabels ({len(ticklabels)}).")
tickd = {loc: lab for loc, lab in zip(locator.locs, ticklabels)}
func = functools.partial(self._format_with_dict, tickd)
formatter = mticker.FuncFormatter(func)
else:
formatter = mticker.FixedFormatter(ticklabels)
if minor:
self.set_minor_formatter(formatter)
locs = self.get_minorticklocs()
ticks = self.get_minor_ticks(len(locs))
else:
self.set_major_formatter(formatter)
locs = self.get_majorticklocs()
ticks = self.get_major_ticks(len(locs))
ret = []
for pos, (loc, tick) in enumerate(zip(locs, ticks)):
tick.update_position(loc)
tick_label = formatter(loc, pos)
# deal with label1
tick.label1.set_text(tick_label)
tick.label1.update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2.update(kwargs)
# only return visible tick labels
if tick.label1.get_visible():
ret.append(tick.label1)
if tick.label2.get_visible():
ret.append(tick.label2)
self.stale = True
return ret
# Wrapper around set_ticklabels used to generate Axes.set_x/ytickabels; can
# go away once the API of Axes.set_x/yticklabels becomes consistent.
@_api.make_keyword_only("3.3", "fontdict")
def _set_ticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Set this Axis' labels with list of string labels.
.. warning::
This method should only be used after fixing the tick positions
using `.Axis.set_ticks`. Otherwise, the labels may end up in
unexpected positions.
Parameters
----------
labels : list of str
The label texts.
fontdict : dict, optional
A dictionary controlling the appearance of the ticklabels.
The default *fontdict* is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
minor : bool, default: False
Whether to set the minor ticklabels rather than the major ones.
Returns
-------
list of `~.Text`
The labels.
Other Parameters
----------------
**kwargs : `~.text.Text` properties.
"""
if fontdict is not None:
kwargs.update(fontdict)
return self.set_ticklabels(labels, minor=minor, **kwargs)
def set_ticks(self, ticks, *, minor=False):
"""
Set this Axis' tick locations.
If necessary, the view limits of the Axis are expanded so that all
given ticks are visible.
Parameters
----------
ticks : list of floats
List of tick locations.
minor : bool, default: False
If ``False``, set the major ticks; if ``True``, the minor ticks.
Notes
-----
The mandatory expansion of the view limits is an intentional design
choice to prevent the surprise of a non-visible tick. If you need
other limits, you should set the limits explicitly after setting the
ticks.
"""
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if self is self.axes.xaxis:
shared = [
ax.xaxis
for ax in self.axes.get_shared_x_axes().get_siblings(self.axes)
]
elif self is self.axes.yaxis:
shared = [
ax.yaxis
for ax in self.axes.get_shared_y_axes().get_siblings(self.axes)
]
elif hasattr(self.axes, "zaxis") and self is self.axes.zaxis:
shared = [
ax.zaxis
for ax in self.axes._shared_z_axes.get_siblings(self.axes)
]
else:
shared = [self]
for axis in shared:
if len(ticks) > 1:
xleft, xright = axis.get_view_interval()
if xright > xleft:
axis.set_view_interval(min(ticks), max(ticks))
else:
axis.set_view_interval(max(ticks), min(ticks))
self.axes.stale = True
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(mticker.FixedLocator(ticks))
return self.get_major_ticks(len(ticks))
def _get_tick_boxes_siblings(self, renderer):
"""
Get the bounding boxes for this `.axis` and its siblings
as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.
By default it just gets bboxes for self.
"""
# Get the Grouper keeping track of x or y label groups for this figure.
axis_names = [
name for name, axis in self.axes._get_axis_map().items()
if name in self.figure._align_label_groups and axis is self]
if len(axis_names) != 1:
return [], []
axis_name, = axis_names
grouper = self.figure._align_label_groups[axis_name]
bboxes = []
bboxes2 = []
# If we want to align labels from other axes:
for ax in grouper.get_siblings(self.axes):
axis = ax._get_axis_map()[axis_name]
ticks_to_draw = axis._update_ticks()
tlb, tlb2 = axis._get_tick_bboxes(ticks_to_draw, renderer)
bboxes.extend(tlb)
bboxes2.extend(tlb2)
return bboxes, bboxes2
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine.
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset text position based on the sequence of bounding
boxes of all the ticklabels.
"""
raise NotImplementedError('Derived must override')
@_api.deprecated("3.3")
def pan(self, numsteps):
"""Pan by *numsteps* (can be positive or negative)."""
self.major.locator.pan(numsteps)
@_api.deprecated("3.3")
def zoom(self, direction):
"""Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out."""
self.major.locator.zoom(direction)
def axis_date(self, tz=None):
"""
Set up axis ticks and labels to treat data along this Axis as dates.
Parameters
----------
tz : str or `datetime.tzinfo`, default: :rc:`timezone`
The timezone used to create date labels.
"""
# By providing a sample datetime instance with the desired timezone,
# the registered converter can be selected, and the "units" attribute,
# which is the timezone, can be set.
if isinstance(tz, str):
import dateutil.tz
tz = dateutil.tz.gettz(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""Return the estimated number of ticks that can fit on the axis."""
# Must be overridden in the subclass
raise NotImplementedError()
def _get_ticks_position(self):
"""
Helper for `XAxis.get_ticks_position` and `YAxis.get_ticks_position`.
Check the visibility of tick1line, label1, tick2line, and label2 on
the first major and the first minor ticks, and return
- 1 if only tick1line and label1 are visible (which corresponds to
"bottom" for the x-axis and "left" for the y-axis);
- 2 if only tick2line and label2 are visible (which corresponds to
"top" for the x-axis and "right" for the y-axis);
- "default" if only tick1line, tick2line and label1 are visible;
- "unknown" otherwise.
"""
major = self.majorTicks[0]
minor = self.minorTicks[0]
if all(tick.tick1line.get_visible()
and not tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return 1
elif all(tick.tick2line.get_visible()
and not tick.tick1line.get_visible()
and tick.label2.get_visible()
and not tick.label1.get_visible()
for tick in [major, minor]):
return 2
elif all(tick.tick1line.get_visible()
and tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return "default"
else:
return "unknown"
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
def _make_getset_interval(method_name, lim_name, attr_name):
"""
Helper to generate ``get_{data,view}_interval`` and
``set_{data,view}_interval`` implementations.
"""
def getter(self):
# docstring inherited.
return getattr(getattr(self.axes, lim_name), attr_name)
def setter(self, vmin, vmax, ignore=False):
# docstring inherited.
if ignore:
setattr(getattr(self.axes, lim_name), attr_name, (vmin, vmax))
else:
oldmin, oldmax = getter(self)
if oldmin < oldmax:
setter(self, min(vmin, vmax, oldmin), max(vmin, vmax, oldmax),
ignore=True)
else:
setter(self, max(vmin, vmax, oldmin), min(vmin, vmax, oldmax),
ignore=True)
self.stale = True
getter.__name__ = f"get_{method_name}_interval"
setter.__name__ = f"set_{method_name}_interval"
return getter, setter
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x' #: Read-only name identifying the axis.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in display coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0.5, y=0,
verticalalignment='top', horizontalalignment='center',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
)
self.label_position = 'bottom'
self.offsetText.set(
x=1, y=0,
verticalalignment='top', horizontalalignment='right',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['xtick.labelsize'],
color=mpl.rcParams['xtick.color'],
)
self.offset_text_position = 'bottom'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the x axis."""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= xaxes <= 1 and (
b - self.pickradius < y < b or
t < y < t + self.pickradius)
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, major=major, **tick_kw)
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
self.label.set_verticalalignment(_api.check_getitem({
'top': 'baseline', 'bottom': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_xlabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'bottom':
try:
spine = self.axes.spines['bottom']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
bottom = bbox.y0
self.label.set_position(
(x, bottom - self.labelpad * self.figure.dpi / 72)
)
else:
try:
spine = self.axes.spines['top']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
top = bbox.y1
self.label.set_position(
(x, top + self.labelpad * self.figure.dpi / 72)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not hasattr(self, '_tick_position'):
self._tick_position = 'bottom'
if self._tick_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
y = bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
y = top + self.OFFSETTEXTPAD * self.figure.dpi / 72
self.offsetText.set_position((x, y))
def get_text_heights(self, renderer):
"""
Return how much space should be reserved for text above and below the
axes, as a pair of floats.
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'top', 'bottom', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
_api.check_in_list(['top', 'bottom', 'both', 'default', 'none'],
position=position)
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
self._tick_position = 'top'
self.offsetText.set_verticalalignment('bottom')
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
else:
assert False, "unhandled parameter not caught by _check_in_list"
self.stale = True
def tick_top(self):
"""
Move ticks and ticklabels (if present) to the top of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('top')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labeltop=label)
def tick_bottom(self):
"""
Move ticks and ticklabels (if present) to the bottom of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('bottom')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labelbottom=label)
def get_ticks_position(self):
"""
Return the ticks position ("top", "bottom", "default", or "unknown").
"""
return {1: "bottom", 2: "top",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervalx")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervalx")
def get_minpos(self):
return self.axes.dataLim.minposx
def set_inverted(self, inverted):
# docstring inherited
a, b = self.get_view_interval()
# cast to bool to avoid bad interaction between python 3.8 and np.bool_
self.axes.set_xlim(sorted((a, b), reverse=bool(inverted)), auto=None)
def set_default_intervals(self):
# docstring inherited
xmin, xmax = 0., 1.
dataMutated = self.axes.dataLim.mutatedx()
viewMutated = self.axes.viewLim.mutatedx()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
xmin = self.converter.convert(valmin, self.units, self)
xmax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervalx = xmin, xmax
if not viewMutated:
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.from_bounds(0, 0, 1, 1)
ends = ends.transformed(self.axes.transAxes -
self.figure.dpi_scale_trans)
length = ends.width * 72
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = self._get_tick_label_size('x') * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y' #: Read-only name identifying the axis.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in display coords, y in axes coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0, y=0.5,
verticalalignment='bottom', horizontalalignment='center',
rotation='vertical', rotation_mode='anchor',
transform=mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes),
)
self.label_position = 'left'
# x in axes coords, y in display coords(!).
self.offsetText.set(
x=0, y=0.5,
verticalalignment='baseline', horizontalalignment='left',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['ytick.labelsize'],
color=mpl.rcParams['ytick.color'],
)
self.offset_text_position = 'left'
def contains(self, mouseevent):
# docstring inherited
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= yaxes <= 1 and (
l - self.pickradius < x < l or
r < x < r + self.pickradius)
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, major=major, **tick_kw)
def set_label_position(self, position):
"""
Set the label position (left or right)
Parameters
----------
position : {'left', 'right'}
"""
self.label.set_rotation_mode('anchor')
self.label.set_verticalalignment(_api.check_getitem({
'left': 'bottom', 'right': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_ylabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'left':
try:
spine = self.axes.spines['left']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
left = bbox.x0
self.label.set_position(
(left - self.labelpad * self.figure.dpi / 72, y)
)
else:
try:
spine = self.axes.spines['right']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
right = bbox.x1
self.label.set_position(
(right + self.labelpad * self.figure.dpi / 72, y)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72)
)
def set_offset_position(self, position):
"""
Parameters
----------
position : {'left', 'right'}
"""
x, y = self.offsetText.get_position()
x = _api.check_getitem({'left': 0, 'right': 1}, position=position)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'left', 'right', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
_api.check_in_list(['left', 'right', 'both', 'default', 'none'],
position=position)
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
assert False, "unhandled parameter not caught by _check_in_list"
self.stale = True
def tick_right(self):
"""
Move ticks and ticklabels (if present) to the right of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('right')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelright=label)
def tick_left(self):
"""
Move ticks and ticklabels (if present) to the left of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('left')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelleft=label)
def get_ticks_position(self):
"""
Return the ticks position ("left", "right", "default", or "unknown").
"""
return {1: "left", 2: "right",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervaly")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervaly")
def get_minpos(self):
return self.axes.dataLim.minposy
def set_inverted(self, inverted):
# docstring inherited
a, b = self.get_view_interval()
# cast to bool to avoid bad interaction between python 3.8 and np.bool_
self.axes.set_ylim(sorted((a, b), reverse=bool(inverted)), auto=None)
def set_default_intervals(self):
# docstring inherited
ymin, ymax = 0., 1.
dataMutated = self.axes.dataLim.mutatedy()
viewMutated = self.axes.viewLim.mutatedy()
if not dataMutated or not viewMutated:
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
valmin, valmax = info.default_limits
ymin = self.converter.convert(valmin, self.units, self)
ymax = self.converter.convert(valmax, self.units, self)
if not dataMutated:
self.axes.dataLim.intervaly = ymin, ymax
if not viewMutated:
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.from_bounds(0, 0, 1, 1)
ends = ends.transformed(self.axes.transAxes -
self.figure.dpi_scale_trans)
length = ends.height * 72
# Having a spacing of at least 2 just looks good.
size = self._get_tick_label_size('y') * 2
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import cv2
import time
import os
'''
writer.write(example.SerializeToString()) input 最后生成数据。
从TFRecords文件中读取数据,
首先需要用tf.train.string_input_producer生成一个解析队列。
之后调用tf.TFRecordReader的tf.parse_single_example解析器。如下图:
'''
IMAGE_SIZE = 128
NUM_CHANNELS = 3
batch_size=50
# CONV1_DEEP = 10
CONV1_DEEP = 10
CONV1_SIZE = 5
# CONV2_DEEP = 16
CONV2_DEEP = 16
CONV2_SIZE = 5
#FC1
FC_SIZE = 1024
#FC_OUTPUT
NUM_LABELS = 2
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.99
num_train = 5001
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"#保存路径
MODEL_NAME = "model.ckpt"#用于save tf的session,我们需要使用后缀ckpt
def distort_color(image,color_ordering=0):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32 / 255)
image = tf.image.random_saturation(image, lower=0.5,upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower = 0.5,upper = 1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image,lower=0.5,upper=1.5)
image = tf.image.random_brightness(image,max_delta=32/255)
image = tf.image.random_contrast(image, lower = 0.5,upper = 1.5)
image = tf.image.random_hue(image,max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32 / 255)
image = tf.image.random_hue(image, max_delta=0.2)
return tf.clip_by_value(image,0.0,1.0)
def preprocess_for_train(image):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image,dtype=tf.float32)
distorted_image = tf.image.random_flip_left_right(image)
distorted_image = distort_color(distorted_image,np.random.randint(2))#随机产生0-2之间的随机数
return distorted_image
def read_and_decode(filename,batch_size):
files = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(files, shuffle=True)
'''
将文件名列表交给tf.train.string_input_producer
函数.string_input_producer来生成一个先入先出的队列, 文件阅读器会需要它来读取数据。
'''
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # 返回文件名和文件
features = tf.parse_single_example(serialized_example,
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string)
})
'''
取出包含image和label的feature对象
因为之前用dictionary封装,所以提取数据简单很多。
'''
image, label = features['img_raw'], features['label']
height, width = features['height'], features['width']
channels = features['channels']
# height = tf.cast(height, tf.uint8)
# width = tf.cast(width, tf.uint8)
decoded_image = tf.decode_raw(image, tf.uint8)#uint8是指0~2^8-1 = 255数据类型,一般在图像处理中很常见。
decoded_image = tf.reshape(decoded_image, [IMAGE_SIZE,IMAGE_SIZE,3])
decoded_image = preprocess_for_train(decoded_image)
min_after_dequeue = 100
capacity = 1000 + 3 * batch_size
'''
capacity是队列的长度
min_after_dequeue是出队后,队列至少剩下min_after_dequeue个数据
'''
image_batch, label_batch = tf.train.shuffle_batch([decoded_image, label],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
image_batch = tf.cast(image_batch, tf.float32)
return image_batch, label_batch
def inference(input_img):#网络结构
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
#conv1_size*conv1_size的矩阵内积相乘,从NUM_CHANNELS产生出CONV1_DEEP新的feature.
initializer=tf.truncated_normal_initializer(stddev=0.1))#正太分布初始,去除extreme值
conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_img, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')#strides[1],[2]表示一格格移动。
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
#print(relu1.get_shape())
with tf.variable_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')#ksize定义2*2的max_pooling,stride依旧步长
#print(pool1.get_shape())
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
#print(relu2.get_shape())
with tf.variable_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#print(pool2.get_shape())
pool_shape = pool2.get_shape().as_list()# 因为get_shape()返回的不是tensor 或string,而是元组,tf会出错,所以需要转list。
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
#print(nodes)
reshaped = tf.reshape(pool2, [-1, nodes])#把之前的数据拉平方便FC,-1表示不管几个sample,这里是[n_sample,32*32*16]变成[n_sample,32*32*16]
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable("weight", [nodes, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(0.0001)(fc1_weights))
fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1))
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(0.0001)(fc2_weights))
fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1, fc2_weights) + fc2_biases
return logit
def component(input_img):#最后一层输出图片
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
#conv1_size*conv1_size的矩阵内积相乘,从NUM_CHANNELS产生出CONV1_DEEP新的feature.
initializer=tf.truncated_normal_initializer(stddev=0.1))#正太分布初始,去除extreme值
conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_img, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')#strides[1],[2]表示一格格移动。
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
#print(relu1.get_shape())
with tf.variable_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')#ksize定义2*2的max_pooling,stride依旧步长
#print(pool1.get_shape())
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
#print(relu2.get_shape())
with tf.variable_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#print(pool2.get_shape())
return pool2
#%%
def train(img,label):
logit = inference(img)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=label)#把softmax和cross_entropy合起来了。
#tf.nn.sigmoid_cross_entropy_with_logits -> this is for 2 class
#tf.nn.softmax_cross_entropy_with_logits -> for multilables
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# #正则L2表达式
# loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))
loss = cross_entropy_mean
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)#平均滑动模型,num_updates = global_step
variable_averages_op = variable_averages.apply(tf.trainable_variables())#应用平均滑动模型,增在varibale更新robust
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, 3500 / batch_size, LEARNING_RATE_DECAY)
#train_step = tf.train.FtrlOptimizer(learning_rate).minimize(loss,global_step=global_step)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss,global_step=global_step)
# correct_prediction = tf.equal(tf.argmax(logit,1), label_batch)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.control_dependencies([train_step,variable_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver()
with tf.Session() as sess: # 开始一个会话
tf.local_variables_initializer().run()
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(num_train):
_,loss_Value, _, step = sess.run([train_op, loss, train_step, global_step])#每运行一次global_step加一
print("----------------------")
print("After %d training step(s),loss on training batch is %g." % (step, loss_Value))
if i%1000 == 0:
print("After %d training step(s),loss on training batch is %g." % (step, loss_Value))
saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)
print("---------------------")
coord.request_stop()
coord.join(threads)
def main(argv=None):
image_batch, label_batch = read_and_decode("train.tfrecords", batch_size=batch_size)
train(image_batch,label_batch)
# test_batch, testlabel_batch = read_and_decode("train2.tfrecords", batch_size=10*batch_size)
# evaluate(test_batch,testlabel_batch)
if __name__ == '__main__':
tf.app.run()
|
from aiohttp_security import AbstractAuthorizationPolicy
from .users import user_map
class AuthorizationPolicy(AbstractAuthorizationPolicy):
"""
This class implement access policy to admin interface.
"""
async def permits(self, identity, permission, context=None) -> bool:
user = user_map.get(identity)
if permission in user.permission:
return True
return False
async def authorized_userid(self, identity) -> int:
return identity
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .game_server_cluster import *
from .game_server_config import *
from .game_server_deployment import *
from .game_server_deployment_rollout import *
from .get_game_server_deployment_rollout import *
from .realm import *
from ._inputs import *
from . import outputs
|
"""Gotify notification service"""
import logging
import json
from typing import List
from django.conf import settings
from moni.utils.requests_proxy import requests_post
from notifiers.services import NotifierService
logger = logging.getLogger(__name__)
class Gotify(NotifierService):
"""Gotify notifiers"""
def __init__(self) -> None:
self.payload = json.dumps({
"title": "Moni: Test notification",
"message": "Test Message"
}).encode("utf-8")
self.HEADERS = {
"Content-type": "application/json"
}
self.SERVICE_DOWN_TEMPLATE = settings.BASE_DIR / \
"notifiers/services/gotify/template_service_down.json"
self.SERVICE_UP_TEMPLATE = settings.BASE_DIR / \
"notifiers/services/gotify/template_service_up.json"
def prep_payload(self, title: str, health_check_url: str, success: bool, expected_status: List, received_status: int, error: str = None) -> None:
TEMPLATE = self.SERVICE_UP_TEMPLATE if success else self.SERVICE_DOWN_TEMPLATE
with open(TEMPLATE) as ft:
template_data = ft.read()
template_data = template_data % (
title, health_check_url, expected_status, received_status, error)
self.payload = template_data.encode("utf-8")
def send(self, webhook: str) -> bool:
try:
response = requests_post(webhook, self.payload, self.HEADERS)
logger.info("Response from Gotify, status_code=%s, response=%s",
response.status, response.data)
if response.status == 200:
return True, response.status, None
return False, response.status, None
except Exception as err:
logger.exception("Gotify notification exception")
return False, None, repr(err)
|
import argparse
import logging
# Need to import there for pickle
from debias.datasets.dataset_utils import QuantileBatcher
from debias.datasets.squad import AnnotatedSquadLoader
from debias.experiments.eval_debiased_squad import compute_all_scores
from debias.models.text_pair_qa_model import TextPairQaDebiasingModel
from debias.modules.attention_layers import WeightedDot, BiAttention
from debias.modules.cudnn_recurrent_dropout import CudnnLSTMRecurrentDropout
from debias.modules.layers import VariationalDropout, seq, FullyConnected, MaxPooler, Conv1d
from debias.modules.word_and_char_encoder import WordAndCharEncoder
from debias.training.evaluator import Evaluator
from debias.training.trainer import Trainer, AdamOptimizer
from debias.utils import py_utils, cli_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--stratify", type=int, default=None)
parser.add_argument("--bias", choices=["tfidf", "tfidf_filtered"], default="tfidf_filtered")
cli_utils.add_general_args(parser)
cli_utils.add_loss_args(parser, default_penalty=2.0)
args = parser.parse_args()
if args.stratify is None:
if args.mode == "learned_mixin":
# Note sure if this actually makes a difference, but I turned this on
# for the learned_mixin case so we do here for exactness
args.stratify = 6
dbg = args.debug
if dbg:
epoch_size = 50
else:
epoch_size = 1341
opt = AdamOptimizer(max_grad_norm=5.0)
batcher = QuantileBatcher(45, 10, 300, 4, 12)
evaluator = Evaluator("squad")
trainer = Trainer(
batcher, opt, evaluator,
eval_batch_size=90,
num_epochs=30, epoch_size=epoch_size,
log_period=100,
prefetch=5, loss_ema=0.999,
n_processes=args.n_processes
)
filtered_bias = args.bias == "tfidf_filtered"
if dbg:
dataset = AnnotatedSquadLoader(
sample_train=1000, sample_dev=500, stratify=args.stratify, filtered_bias=filtered_bias)
else:
dataset = AnnotatedSquadLoader(
sample_train_eval=10000, stratify=args.stratify, filtered_bias=filtered_bias)
dim = 100
recurrent_layer = CudnnLSTMRecurrentDropout(dim, 0.0)
model = TextPairQaDebiasingModel(
None, # Assume pre-tokenized data
text_encoder=WordAndCharEncoder(
"glove.6B.50d" if dbg else "crawl-300d-2M",
first_n=None,
char_embed_dim=24,
character_mapper=Conv1d(100, 5, None),
character_pooler=MaxPooler(),
word_length=30
),
map_embed=seq(
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2)
),
fuse_layer=BiAttention(WeightedDot()),
post_process_layer=seq(
FullyConnected(dim * 2, activation="glu"),
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2),
recurrent_layer,
VariationalDropout(0.2),
),
debias_loss_fn=cli_utils.get_qa_loss_fn(args)
)
with open(__file__) as f:
notes = f.read()
py_utils.add_stdout_logger()
trainer.train(dataset, model, args.output_dir, notes)
if args.output_dir:
logging.info("Evaluating")
compute_all_scores(args.output_dir, ["dev", "add_sent", "add_one_sent"])
if __name__ == '__main__':
main()
|
"""
Defines a functions for training a NN.
"""
from data_generator import AudioGenerator
import _pickle as pickle
from keras import backend as K
from keras.models import Model
from keras.layers import (Input, Lambda)
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
import os
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def add_ctc_loss(input_to_softmax):
the_labels = Input(name='the_labels', shape=(None,), dtype='float32')
input_lengths = Input(name='input_length', shape=(1,), dtype='int64')
label_lengths = Input(name='label_length', shape=(1,), dtype='int64')
output_lengths = Lambda(input_to_softmax.output_length)(input_lengths)
# CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')(
[input_to_softmax.output, the_labels, output_lengths, label_lengths])
model = Model(
inputs=[input_to_softmax.input, the_labels, input_lengths, label_lengths],
outputs=loss_out)
return model
def train_model(input_to_softmax,
pickle_path,
save_model_path,
train_json='train_corpus.json',
valid_json='valid_corpus.json',
minibatch_size=20,
spectrogram=True,
mfcc_dim=13,
optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5),
epochs=30,
verbose=1,
sort_by_duration=False,
max_duration=10.0):
# create a class instance for obtaining batches of data
audio_gen = AudioGenerator(minibatch_size=minibatch_size,
spectrogram=spectrogram, mfcc_dim=mfcc_dim, max_duration=max_duration,
sort_by_duration=sort_by_duration)
# add the training data to the generator
audio_gen.load_train_data(train_json)
audio_gen.load_validation_data(valid_json)
# calculate steps_per_epoch
num_train_examples=len(audio_gen.train_audio_paths)
steps_per_epoch = num_train_examples//minibatch_size
# calculate validation_steps
num_valid_samples = len(audio_gen.valid_audio_paths)
validation_steps = num_valid_samples//minibatch_size
# add CTC loss to the NN specified in input_to_softmax
model = add_ctc_loss(input_to_softmax)
# CTC loss is implemented elsewhere, so use a dummy lambda function for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer)
# make results/ directory, if necessary
if not os.path.exists('results'):
os.makedirs('results')
# add checkpointer
checkpointer = ModelCheckpoint(filepath='results/'+save_model_path, verbose=0)
# train the model
hist = model.fit_generator(generator=audio_gen.next_train(), steps_per_epoch=steps_per_epoch,
epochs=epochs, validation_data=audio_gen.next_valid(), validation_steps=validation_steps,
callbacks=[checkpointer], verbose=verbose)
# save model loss
with open('results/'+pickle_path, 'wb') as f:
pickle.dump(hist.history, f)
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [s.strip() for s in open('requirements.txt', 'r').readlines()]
setup_requirements = []
test_requirements = []
setup(
author="Guillermo E. Blanco",
author_email='geblanco@lsi.uned.es',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
description="Multiple Choice Evaluation utilities",
entry_points={
'console_scripts': [
'mcqa_utils=mcqa_utils.mcqa_utils:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='mcqa_utils',
name='mcqa_utils',
packages=find_packages(include=['mcqa_utils', 'mcqa_utils.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/geblanco/mcqa_utils',
version='0.7.0',
zip_safe=False,
)
|
import argparse
import os
import random
import torch
import os
import misc_utils as utils
def parse_args():
# experiment specifics
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='cache',
help='folder name to clear')
parser.add_argument('--rm', action='store_true', help='debug mode')
return parser.parse_args()
opt = parse_args()
paths = ['checkpoints', 'logs', 'results']
utils.color_print("Directory '%s' cleared." % opt.tag, 1)
if opt.rm:
for path in paths:
p = os.path.join(path, opt.tag)
if os.path.isdir(p):
command = 'rm -r ' + p
print(command)
os.system(command)
else:
tmp = os.path.join('trash', str(random.randint(1000000000, 9999999999)))
utils.try_make_dir(tmp)
for path in paths:
p = os.path.join(path, opt.tag)
if os.path.isdir(p):
command = 'mv %s %s' % (p, tmp)
print(command)
os.system(command)
|
#!"E:\zip my django projects\basic Django\crud operations\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
import xmnlp
xmnlp.set_model('../xmnlp-onnx-models')
with open("text.txt", "r", encoding='utf8') as f:
text = f.read()
result = xmnlp.keyphrase(text, k=1)
sentiment = xmnlp.sentiment(result[0])
print(result, sentiment)
|
from math import sin, cos, radians
for i in range(int(input())):
v, theta, x, h1, h2 = list(map(float, input().split()))
t = x / v / cos(radians(theta))
yt = v * t * sin(radians(theta)) - (1 / 2 * 9.8 * t**2)
print("Safe" if h2 - yt >= 1 and yt - h1 >= 1 else "Not safe")
|
style = '''
/* /////////////////////////////////////////////////////////////////////////////////////////////////
QTableWidget */
QTableWidget {{
background-color: {_bg_color};
outline: 0;
padding: 5px;
border-radius: {_radius}px;
gridline-color: {_grid_line_color};
}}
/*
QTableWidget::item:hover {{
background-color: {_selection_color};
}}*/
QTableWidget::horizontalHeader {{
background-color: rgb(33, 37, 43);
}}
QTableWidget QTableCornerButton::section {{
border: none;
background-color: {_header_horizontal_color};
padding: 3px;
border-top-left-radius: {_radius}px;
}}
QHeaderView::section:horizontal
{{
border: none;
background-color: {_header_horizontal_color};
padding: 3px;
}}
QHeaderView::section:vertical
{{
border: none;
background-color: {_header_vertical_color};
padding-left: 5px;
padding-right: 5px;
border-bottom: 1px solid {_bottom_line_color};
margin-bottom: 1px;
}}
/* /////////////////////////////////////////////////////////////////////////////////////////////////
ScrollBars */
QScrollBar:horizontal {{
border: none;
background: {_scroll_bar_bg_color};
height: 8px;
margin: 0px 21px 0 21px;
border-radius: 0px;
}}
QScrollBar::handle:horizontal {{
background: {_context_color};
min-width: 25px;
border-radius: 4px
}}
QScrollBar::add-line:horizontal {{
border: none;
background: {_scroll_bar_btn_color};
width: 20px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
subcontrol-position: right;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:horizontal {{
border: none;
background: {_scroll_bar_btn_color};
width: 20px;
border-top-left-radius: 4px;
border-bottom-left-radius: 4px;
subcontrol-position: left;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:horizontal, QScrollBar::down-arrow:horizontal
{{
background: none;
}}
QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal
{{
background: none;
}}
QScrollBar:vertical {{
border: none;
background: {_scroll_bar_bg_color};
width: 8px;
margin: 21px 0 21px 0;
border-radius: 0px;
}}
QScrollBar::handle:vertical {{
background: {_context_color};
min-height: 25px;
border-radius: 4px
}}
QScrollBar::add-line:vertical {{
border: none;
background: {_scroll_bar_btn_color};
height: 20px;
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
border: none;
background: {_scroll_bar_btn_color};
height: 20px;
border-top-left-radius: 4px;
border-top-right-radius: 4px;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
'''
|
""" IO Handler for LAS (and compressed LAZ) file format """
import pylas
from laserchicken import keys
from laserchicken.io.base_io_handler import IOHandler
from laserchicken.io.utils import convert_to_short_type, select_valid_attributes
DEFAULT_LAS_ATTRIBUTES = {
'x',
'y',
'z',
'intensity',
'gps_time',
'raw_classification',
}
class LASHandler(IOHandler):
""" Class for IO of point-cloud data in LAS file format """
def read(self, attributes=DEFAULT_LAS_ATTRIBUTES):
"""
Load the points from a LAS(LAZ) file into memory.
:param attributes: list of attributes to read ('all' for all attributes in file)
:return: point cloud data structure
"""
file = pylas.read(self.path)
attributes_available = [el if el not in ['X', 'Y', 'Z'] else el.lower()
for el in file.points.dtype.names]
attributes = select_valid_attributes(attributes_available, attributes)
points = {}
for name in attributes:
if hasattr(file, name):
data = getattr(file, name)
points[name] = _get_attribute(data, data.dtype.name)
return {keys.point: points}
def write(self, point_cloud, attributes='all', file_version='1.2', point_format_id=3):
"""
Write point cloud to a LAS(LAZ) file.
:param point_cloud:
:param attributes: list of attributes to write ('all' for all attributes in point_cloud)
:param file_version:
:param point_format_id:
:return:
"""
file = pylas.create(point_format_id=point_format_id,
file_version=file_version)
points = point_cloud[keys.point]
attributes = select_valid_attributes([attr for attr in points.keys()], attributes)
# NOTE: adding extra dims and assignment should be done in two steps,
# some fields (e.g. raw_classification) are otherwise overwritten
for attribute in attributes:
data, type = _get_data_and_type(points[attribute])
type_short = convert_to_short_type(type)
if attribute not in 'xyz':
# x,y,z are not there but file methods can be used to convert coords to int4
if attribute not in file.points.dtype.names:
file.add_extra_dim(name=attribute, type=type_short)
file_type_short = convert_to_short_type(getattr(file, attribute).dtype.name)
if not file_type_short == type_short:
raise TypeError('Data type in file does not match the one in point cloud: '
'for {}, {} vs {}'.format(attribute, file_type_short, type_short))
for attribute in attributes:
data, _ = _get_data_and_type(points[attribute])
if data.size == 0:
raise ValueError('Cannot write empty point-cloud!')
else:
setattr(file, attribute, data)
try:
file.write(self.path)
except ValueError as err:
raise ValueError('Error in writing LAS file (file_version {}, point_format_id {}). '
'pylas error below:\n{}'.format(file_version, point_format_id, err))
def _get_attribute(data, data_type):
return {'type': data_type, 'data': data}
def _get_data_and_type(attribute):
return attribute['data'], attribute['type']
|
import zmq
ctx = zmq.Context.instance()
server = ctx.socket(zmq.PULL)
server.bind('inproc://foo')
clients = [ctx.socket(zmq.PUSH) for i in range(10)]
for client in clients:
client.connect('inproc://foo')
client.send(b'DATA')
for i in range(10):
print(repr(server.recv()))
|
from django.core.urlresolvers import resolve
from django.template.loader import render_to_string
from django.test import TestCase
from django.http import HttpRequest
from lists.views import home_page
from lists.models import Item, List
class HomePageTest(TestCase):
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/')
self.assertEqual(found.func, home_page)
def test_home_page_returns_correct_html(self):
request=HttpRequest()
response=home_page(request)
#only on Django 1.9.5
expected_html = render_to_string('home.html',request=request)
class ListAndItemModelsTests(TestCase):
def test_saving_and_retrieving_items(self):
list_ = List()
list_.save()
first_item = Item()
first_item.text = 'The first (ever) list item'
first_item.list = list_
first_item.save()
second_item = Item()
second_item.text = 'Item the second'
second_item.list = list_
second_item.save()
saved_list = List.objects.first()
self.assertEqual(saved_list, list_)
saved_items = Item.objects.all()
self.assertEqual(saved_items.count(), 2)
first_saved_item = saved_items[0]
second_saved_item = saved_items[1]
self.assertEqual(first_saved_item.text, 'The first (ever) list item')
self.assertEqual(first_saved_item.list, list_)
self.assertEqual(second_saved_item.text, 'Item the second')
self.assertEqual(second_saved_item.list, list_)
class ListViewTest(TestCase):
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get('/lists/%d/' % (list_.id,))
self.assertTemplateUsed(response, 'list.html')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get('/lists/%d/' % (correct_list.id,))
self.assertEqual(response.context['list'], correct_list)
class NewListTest(TestCase):
def test_saving_a_POST_request(self):
self.client.post(
'/lists/new',
data={'item_text': 'A new list item'}
)
self.assertEqual(Item.objects.count(),1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
response = self.client.post(
'/lists/new',
data={'item_text': 'A new list item'}
)
new_list = List.objects.first()
self.assertRedirects(response, '/lists/%d/' % (new_list.id,))
class NewItemTest(TestCase):
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
'/lists/%d/add_item' % (correct_list.id,),
data={'item_text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
'/lists/%d/add_item' % (correct_list.id,),
data={'item_text': 'A new item for an existing list'}
)
self.assertRedirects(response, '/lists/%d/' % (correct_list.id,))
|
from .alert_model import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.